From 49c72df527d484a7579b4d5d31735e6e8b5b5a9a Mon Sep 17 00:00:00 2001 From: mattBrzezinski Date: Sat, 3 Aug 2024 06:15:12 +0000 Subject: [PATCH] AWS API Definitions Updated --- src/AWSServices.jl | 3 +- src/services/acm.jl | 66 +- src/services/acm_pca.jl | 19 +- src/services/application_auto_scaling.jl | 326 +++-- src/services/application_signals.jl | 20 +- src/services/appstream.jl | 32 +- src/services/arc_zonal_shift.jl | 146 +- src/services/bedrock.jl | 212 ++- src/services/bedrock_agent.jl | 978 +++++++++++++- src/services/bedrock_agent_runtime.jl | 179 ++- src/services/bedrock_runtime.jl | 87 +- src/services/chime_sdk_media_pipelines.jl | 31 +- src/services/cleanrooms.jl | 838 +++++++++++- src/services/cloudfront.jl | 9 +- src/services/cloudhsm_v2.jl | 189 ++- src/services/codepipeline.jl | 130 ++ src/services/connect.jl | 517 +++++-- src/services/connect_contact_lens.jl | 2 +- src/services/controlcatalog.jl | 77 ++ src/services/controltower.jl | 87 +- src/services/datazone.jl | 426 ++++++ src/services/direct_connect.jl | 58 +- src/services/dynamodb.jl | 29 +- src/services/ec2.jl | 240 +++- src/services/ecr.jl | 220 ++- src/services/eks.jl | 9 + src/services/elastic_load_balancing_v2.jl | 89 +- src/services/elasticache.jl | 603 +++++---- src/services/entityresolution.jl | 40 +- src/services/firehose.jl | 4 + src/services/glue.jl | 2 + src/services/iotsitewise.jl | 45 +- src/services/ivs_realtime.jl | 149 ++- src/services/kinesis_analytics_v2.jl | 99 +- src/services/lex_models_v2.jl | 6 +- .../license_manager_linux_subscriptions.jl | 315 ++++- src/services/mediaconnect.jl | 3 + src/services/medialive.jl | 2 + src/services/medical_imaging.jl | 11 + src/services/memorydb.jl | 10 +- src/services/mobile.jl | 299 ----- src/services/mq.jl | 63 +- src/services/network_firewall.jl | 26 +- src/services/opensearch.jl | 2 + src/services/organizations.jl | 35 +- src/services/payment_cryptography_data.jl | 59 +- src/services/pi.jl | 3 +- src/services/pinpoint_sms_voice_v2.jl | 144 +- src/services/qapps.jl | 1189 +++++++++++++++++ src/services/qbusiness.jl | 4 + src/services/qconnect.jl | 205 +++ src/services/quicksight.jl | 120 ++ src/services/rds.jl | 122 +- src/services/redshift_serverless.jl | 12 +- src/services/rekognition.jl | 6 +- src/services/resiliencehub.jl | 217 ++- src/services/rolesanywhere.jl | 4 + src/services/s3.jl | 63 +- src/services/sagemaker.jl | 262 +++- src/services/secrets_manager.jl | 33 +- src/services/sfn.jl | 73 +- src/services/ssm_quicksetup.jl | 423 ++++++ src/services/timestream_query.jl | 4 +- src/services/tnb.jl | 38 +- src/services/workspaces.jl | 389 +++++- 65 files changed, 8500 insertions(+), 1603 deletions(-) delete mode 100644 src/services/mobile.jl create mode 100644 src/services/qapps.jl create mode 100644 src/services/ssm_quicksetup.jl diff --git a/src/AWSServices.jl b/src/AWSServices.jl index 259e8f8066..77d8c5f655 100644 --- a/src/AWSServices.jl +++ b/src/AWSServices.jl @@ -558,7 +558,6 @@ const migrationhuborchestrator = AWS.RestJSONService( const migrationhubstrategy = AWS.RestJSONService( "migrationhub-strategy", "migrationhub-strategy", "2020-02-19" ) -const mobile = AWS.RestJSONService("AWSMobileHubService", "mobile", "2017-07-01") const mobile_analytics = AWS.RestJSONService( "mobileanalytics", "mobileanalytics", "2014-06-05" ) @@ -640,6 +639,7 @@ const privatenetworks = AWS.RestJSONService( "private-networks", "private-networks", "2021-12-03" ) const proton = AWS.JSONService("proton", "proton", "2020-07-20", "1.0", "AwsProton20200720") +const qapps = AWS.RestJSONService("qapps", "data.qapps", "2023-11-27") const qbusiness = AWS.RestJSONService("qbusiness", "qbusiness", "2023-11-27") const qconnect = AWS.RestJSONService("wisdom", "wisdom", "2020-10-19") const qldb = AWS.RestJSONService("qldb", "qldb", "2019-01-02") @@ -772,6 +772,7 @@ const ssm_contacts = AWS.JSONService( "ssm-contacts", "ssm-contacts", "2021-05-03", "1.1", "SSMContacts" ) const ssm_incidents = AWS.RestJSONService("ssm-incidents", "ssm-incidents", "2018-05-10") +const ssm_quicksetup = AWS.RestJSONService("ssm-quicksetup", "ssm-quicksetup", "2018-05-10") const ssm_sap = AWS.RestJSONService("ssm-sap", "ssm-sap", "2018-05-10") const sso = AWS.RestJSONService("awsssoportal", "portal.sso", "2019-06-10") const sso_admin = AWS.JSONService("sso", "sso", "2020-07-20", "1.1", "SWBExternalService") diff --git a/src/services/acm.jl b/src/services/acm.jl index 1e05ab2315..68d4b6e640 100644 --- a/src/services/acm.jl +++ b/src/services/acm.jl @@ -223,10 +223,12 @@ end get_certificate(certificate_arn) get_certificate(certificate_arn, params::Dict{String,<:Any}) -Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of the -certificate of the issuing CA and the intermediate certificates of any other subordinate -CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode the -certificates and inspect individual fields. +Retrieves a certificate and its certificate chain. The certificate may be either a public +or private certificate issued using the ACM RequestCertificate action, or a certificate +imported into ACM using the ImportCertificate action. The chain consists of the certificate +of the issuing CA and the intermediate certificates of any other subordinate CAs. All of +the certificates are base64 encoded. You can use OpenSSL to decode the certificates and +inspect individual fields. # Arguments - `certificate_arn`: String that contains a certificate ARN in the following format: @@ -271,23 +273,21 @@ ACM does not provide managed renewal for certificates that you import. Note the guidelines when importing third party certificates: You must enter the private key that matches the certificate you are importing. The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase. The private -key must be no larger than 5 KB (5,120 bytes). If the certificate you are importing is -not self-signed, you must enter its certificate chain. If a certificate chain is -included, the issuer must be the subject of one of the certificates in the chain. The -certificate, private key, and certificate chain must be PEM-encoded. The current time -must be between the Not Before and Not After certificate fields. The Issuer field must -not be empty. The OCSP authority URL, if present, must not exceed 1000 characters. To -import a new certificate, omit the CertificateArn argument. Include this argument only when -you want to replace a previously imported certificate. When you import a certificate by -using the CLI, you must specify the certificate, the certificate chain, and the private key -by their file names preceded by fileb://. For example, you can specify a certificate saved -in the C:temp folder as fileb://C:tempcertificate_to_import.pem. If you are making an HTTP -or HTTPS Query request, include these arguments as BLOBs. When you import a certificate -by using an SDK, you must specify the certificate, the certificate chain, and the private -key files in the manner required by the programming language you're using. The -cryptographic algorithm of an imported certificate must match the algorithm of the signing -CA. For example, if the signing CA key type is RSA, then the certificate key type must also -be RSA. This operation returns the Amazon Resource Name (ARN) of the imported certificate. +key must be no larger than 5 KB (5,120 bytes). The certificate, private key, and +certificate chain must be PEM-encoded. The current time must be between the Not Before +and Not After certificate fields. The Issuer field must not be empty. The OCSP +authority URL, if present, must not exceed 1000 characters. To import a new certificate, +omit the CertificateArn argument. Include this argument only when you want to replace a +previously imported certificate. When you import a certificate by using the CLI, you must +specify the certificate, the certificate chain, and the private key by their file names +preceded by fileb://. For example, you can specify a certificate saved in the C:temp folder +as fileb://C:tempcertificate_to_import.pem. If you are making an HTTP or HTTPS Query +request, include these arguments as BLOBs. When you import a certificate by using an +SDK, you must specify the certificate, the certificate chain, and the private key files in +the manner required by the programming language you're using. The cryptographic +algorithm of an imported certificate must match the algorithm of the signing CA. For +example, if the signing CA key type is RSA, then the certificate key type must also be RSA. + This operation returns the Amazon Resource Name (ARN) of the imported certificate. # Arguments - `certificate`: The certificate to import. @@ -335,10 +335,12 @@ end list_certificates() list_certificates(params::Dict{String,<:Any}) -Retrieves a list of certificate ARNs and domain names. You can request that only -certificates that match a specific status be listed. You can also filter by specific -attributes of the certificate. Default filtering returns only RSA_2048 certificates. For -more information, see Filters. +Retrieves a list of certificate ARNs and domain names. By default, the API returns RSA_2048 +certificates. To return all certificates in the account, include the keyType filter with +the values [RSA_1024, RSA_2048, RSA_3072, RSA_4096, EC_prime256v1, EC_secp384r1, +EC_secp521r1]. In addition to keyType, you can also filter by the CertificateStatuses, +keyUsage, and extendedKeyUsage attributes on the certificate. For more information, see +Filters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -592,10 +594,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not - supported by all network clients. Some AWS services may require RSA keys, or only support - ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to - ensure that compatibility is not broken. Check the requirements for the AWS service where - you plan to deploy your certificate. Default: RSA_2048 + supported by all network clients. Some Amazon Web Services services may require RSA keys, + or only support ECDSA keys of a particular size, while others allow the use of either RSA + and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the + Amazon Web Services service where you plan to deploy your certificate. For more information + about selecting an algorithm, see Key algorithms. Algorithms supported for an ACM + certificate request include: RSA_2048 EC_prime256v1 EC_secp384r1 Other + listed algorithms are for imported certificates only. When you request a private PKI + certificate signed by a CA from Amazon Web Services Private CA, the specified signing + algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. + Default: RSA_2048 - `"Options"`: Currently, you can use this parameter to specify whether to add the certificate to a certificate transparency log. Certificate transparency makes it possible to detect SSL/TLS certificates that have been mistakenly or maliciously issued. diff --git a/src/services/acm_pca.jl b/src/services/acm_pca.jl index c039eea54b..3767bc9303 100644 --- a/src/services/acm_pca.jl +++ b/src/services/acm_pca.jl @@ -702,7 +702,7 @@ a Policy for Cross-Account Access. # Arguments - `resource_arn`: The Amazon Resource Number (ARN) of the private CA that will have its policy retrieved. You can find the CA's ARN by calling the ListCertificateAuthorities - action. + action. </p> """ function get_policy(ResourceArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -754,15 +754,14 @@ certificate signed by the preceding subordinate CA must come next, and so on unt chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following -extensions to be marked critical in the imported CA certificate or chain. Authority key -identifier Basic constraints (must be marked critical) Certificate policies Extended -key usage Inhibit anyPolicy Issuer alternative name Key usage Name constraints -Policy mappings Subject alternative name Subject directory attributes Subject key -identifier Subject information access Amazon Web Services Private CA rejects the -following extensions when they are marked critical in an imported CA certificate or chain. - Authority information access CRL distribution points Freshest CRL Policy constraints - Amazon Web Services Private Certificate Authority will also reject any other extension -marked as critical not contained on the preceding list of allowed extensions. +extensions to be marked critical in the imported CA certificate or chain. Basic +constraints (must be marked critical) Subject alternative names Key usage Extended +key usage Authority key identifier Subject key identifier Issuer alternative name +Subject directory attributes Subject information access Certificate policies Policy +mappings Inhibit anyPolicy Amazon Web Services Private CA rejects the following +extensions when they are marked critical in an imported CA certificate or chain. Name +constraints Policy constraints CRL distribution points Authority information access +Freshest CRL Any other extension # Arguments - `certificate`: The PEM-encoded certificate for a private CA. This may be a self-signed diff --git a/src/services/application_auto_scaling.jl b/src/services/application_auto_scaling.jl index e50633819a..c57f3b09e8 100644 --- a/src/services/application_auto_scaling.jl +++ b/src/services/application_auto_scaling.jl @@ -19,7 +19,7 @@ scaling policy in the Application Auto Scaling User Guide. - `resource_id`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -51,26 +51,28 @@ scaling policy in the Application Auto Scaling User Guide. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -85,8 +87,10 @@ scaling policy in the Application Auto Scaling User Guide. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -149,7 +153,7 @@ more information, see Delete a scheduled action in the Application Auto Scaling - `resource_id`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -181,26 +185,28 @@ more information, see Delete a scheduled action in the Application Auto Scaling 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -215,8 +221,10 @@ more information, see Delete a scheduled action in the Application Auto Scaling Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `scheduled_action_name`: The name of the scheduled action. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource @@ -282,7 +290,7 @@ with it. - `resource_id`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -314,17 +322,19 @@ with it. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -349,8 +359,10 @@ with it. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -421,7 +433,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceIds"`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -453,18 +465,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -489,8 +503,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scalable_targets( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -549,7 +565,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -581,20 +597,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -616,8 +634,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scaling_activities( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -672,7 +692,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -704,20 +724,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -739,8 +761,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scaling_policies( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -775,8 +799,8 @@ end Describes the Application Auto Scaling scheduled actions for the specified service namespace. You can filter the results using the ResourceId, ScalableDimension, and -ScheduledActionNames parameters. For more information, see Scheduled scaling and Managing -scheduled scaling in the Application Auto Scaling User Guide. +ScheduledActionNames parameters. For more information, see Scheduled scaling in the +Application Auto Scaling User Guide. # Arguments - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -794,7 +818,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -826,20 +850,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -861,8 +887,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `"ScheduledActionNames"`: The names of the scheduled actions to describe. """ function describe_scheduled_actions( @@ -897,8 +925,8 @@ end list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) Returns all the tags on the specified Application Auto Scaling scalable target. For general -information about tags, including the format and syntax, see Tagging Amazon Web Services -resources in the Amazon Web Services General Reference. +information about tags, including the format and syntax, see Tagging your Amazon Web +Services resources in the Amazon Web Services General Reference. # Arguments - `resource_arn`: Specify the ARN of the scalable target. For example: @@ -964,7 +992,7 @@ scaling policies that were specified for the scalable target are deleted. - `resource_id`: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -996,26 +1024,28 @@ scaling policies that were specified for the scalable target are deleted. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -1030,8 +1060,10 @@ scaling policies that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -1116,7 +1148,7 @@ scheduled actions that were specified for the scalable target are deleted. - `resource_id`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -1148,26 +1180,28 @@ scheduled actions that were specified for the scalable target are deleted. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -1182,8 +1216,10 @@ scheduled actions that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `scheduled_action_name`: The name of the scheduled action. This name must be unique among all other scheduled actions on the specified scalable target. - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -1205,8 +1241,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys At and cron expressions use Universal Coordinated Time (UTC) by default. The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year]. For rate expressions, value is a positive integer and unit is minute - | minutes | hour | hours | day | days. For more information and examples, see Example - scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide. + | minutes | hour | hours | day | days. For more information, see Schedule recurring scaling + actions using cron expressions in the Application Auto Scaling User Guide. - `"StartTime"`: The date and time for this scheduled action to start, in UTC. - `"Timezone"`: Specifies the time zone used when setting a scheduled action by using an at or cron expression. If a time zone is not provided, UTC is used by default. Valid values @@ -1294,7 +1330,7 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. - `resource_id`: The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service - name. Example: service/default/sample-webapp. Spot Fleet - The resource type is + name. Example: service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -1326,17 +1362,19 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -1361,8 +1399,10 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -1383,20 +1423,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys capacity limit in response to changing demand. This property is required when registering a new scalable target. For the following resources, the minimum value allowed is 0. AppStream 2.0 fleets Aurora DB clusters ECS services EMR clusters Lambda - provisioned concurrency SageMaker endpoint variants SageMaker Serverless endpoint - provisioned concurrency Spot Fleets custom resources It's strongly recommended that - you specify a value greater than 0. A value greater than 0 means that data points are - continuously reported to CloudWatch that scaling policies can use to scale on a metric like - average CPU utilization. For all other resources, the minimum allowed value depends on the - type of resource that you are using. If you provide a value that is lower than what a - resource can accept, an error occurs. In which case, the error message will provide the - minimum value that the resource can accept. + provisioned concurrency SageMaker endpoint variants SageMaker inference components + SageMaker serverless endpoint provisioned concurrency Spot Fleets custom resources + It's strongly recommended that you specify a value greater than 0. A value greater than 0 + means that data points are continuously reported to CloudWatch that scaling policies can + use to scale on a metric like average CPU utilization. For all other resources, the minimum + allowed value depends on the type of resource that you are using. If you provide a value + that is lower than what a resource can accept, an error occurs. In which case, the error + message will provide the minimum value that the resource can accept. - `"RoleARN"`: This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which - it creates if it does not yet exist. For more information, see Application Auto Scaling IAM - roles. + it creates if it does not yet exist. For more information, see How Application Auto Scaling + works with IAM. - `"SuspendedState"`: An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the @@ -1405,8 +1445,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are suspended. For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended. For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that - involve scheduled actions are suspended. For more information, see Suspending and - resuming scaling in the Application Auto Scaling User Guide. + involve scheduled actions are suspended. For more information, see Suspend and resume + scaling in the Application Auto Scaling User Guide. - `"Tags"`: Assigns one or more tags to the scalable target. Use this parameter to tag the scalable target when it is created. To tag an existing scalable target, use the TagResource operation. Each tag consists of a tag key and a tag value. Both the tag key and the tag @@ -1466,10 +1506,10 @@ tag key and a tag value. To edit a tag, specify an existing tag key and a new ta You can use this operation to tag an Application Auto Scaling scalable target, but you cannot tag a scaling policy or scheduled action. You can also add tags to an Application Auto Scaling scalable target while creating it (RegisterScalableTarget). For general -information about tags, including the format and syntax, see Tagging Amazon Web Services -resources in the Amazon Web Services General Reference. Use tags to control access to a -scalable target. For more information, see Tagging support for Application Auto Scaling in -the Application Auto Scaling User Guide. +information about tags, including the format and syntax, see Tagging your Amazon Web +Services resources in the Amazon Web Services General Reference. Use tags to control access +to a scalable target. For more information, see Tagging support for Application Auto +Scaling in the Application Auto Scaling User Guide. # Arguments - `resource_arn`: Identifies the Application Auto Scaling scalable target that you want to @@ -1482,7 +1522,7 @@ the Application Auto Scaling User Guide. specify an existing tag key with a different tag value, Application Auto Scaling replaces the current tag value with the specified one. For information about the rules that apply to tag keys and tag values, see User-defined tag restrictions in the Amazon Web Services - Billing and Cost Management User Guide. + Billing User Guide. """ function tag_resource(ResourceARN, Tags; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/application_signals.jl b/src/services/application_signals.jl index 00ef2fb82c..9ce9f91eac 100644 --- a/src/services/application_signals.jl +++ b/src/services/application_signals.jl @@ -168,7 +168,8 @@ Returns information about a service discovered by Application Signals. # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested start time will be rounded to the nearest hour. - `key_attributes`: Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type @@ -180,6 +181,7 @@ Returns information about a service discovered by Application Signals. Environment specifies the location where this object is hosted, or what it belongs to. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. """ function get_service( @@ -256,7 +258,8 @@ services. # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested end time will be rounded to the nearest hour. - `key_attributes`: Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type @@ -268,6 +271,7 @@ services. Environment specifies the location where this object is hosted, or what it belongs to. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -325,7 +329,8 @@ are instrumented with CloudWatch RUM app monitors. # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested start time will be rounded to the nearest hour. - `key_attributes`: Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type @@ -337,6 +342,7 @@ are instrumented with CloudWatch RUM app monitors. Environment specifies the location where this object is hosted, or what it belongs to. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -429,7 +435,8 @@ Signals. Only the operations that were invoked during the specified time range a # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested end time will be rounded to the nearest hour. - `key_attributes`: Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type @@ -441,6 +448,7 @@ Signals. Only the operations that were invoked during the specified time range a Environment specifies the location where this object is hosted, or what it belongs to. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -498,9 +506,11 @@ Services are discovered through Application Signals instrumentation. # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested start time will be rounded to the nearest hour. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/appstream.jl b/src/services/appstream.jl index b14cd3d09e..8aa0db1e95 100644 --- a/src/services/appstream.jl +++ b/src/services/appstream.jl @@ -765,7 +765,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming - instance. Specify a value between 60 and 360000. + instance. Specify a value between 60 and 36000. - `"DisplayName"`: The fleet name to display. - `"DomainJoinInfo"`: The name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. This is not allowed for Elastic fleets. @@ -793,13 +793,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. - Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this - feature, we recommend that you specify a value that corresponds exactly to a whole number - of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to - the nearest minute. For example, if you specify a value of 70, users are disconnected after - 1 minute of inactivity. If you specify a value that is at the midpoint between two - different minutes, the value is rounded up. For example, if you specify a value of 90, - users are disconnected after 2 minutes of inactivity. + Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable + this feature, we recommend that you specify a value that corresponds exactly to a whole + number of minutes (for example, 60, 120, and 180). If you don't do this, the value is + rounded to the nearest minute. For example, if you specify a value of 70, users are + disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint + between two different minutes, the value is rounded up. For example, if you specify a value + of 90, users are disconnected after 2 minutes of inactivity. - `"ImageArn"`: The ARN of the public, private, or shared image to use. - `"ImageName"`: The name of the image used to create the fleet. - `"MaxConcurrentSessions"`: The maximum concurrent sessions of the Elastic fleet. This is @@ -3147,7 +3147,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming - instance. Specify a value between 60 and 360000. + instance. Specify a value between 60 and 36000. - `"DisplayName"`: The fleet name to display. - `"DomainJoinInfo"`: The name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. @@ -3170,13 +3170,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. - Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this - feature, we recommend that you specify a value that corresponds exactly to a whole number - of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to - the nearest minute. For example, if you specify a value of 70, users are disconnected after - 1 minute of inactivity. If you specify a value that is at the midpoint between two - different minutes, the value is rounded up. For example, if you specify a value of 90, - users are disconnected after 2 minutes of inactivity. + Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable + this feature, we recommend that you specify a value that corresponds exactly to a whole + number of minutes (for example, 60, 120, and 180). If you don't do this, the value is + rounded to the nearest minute. For example, if you specify a value of 70, users are + disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint + between two different minutes, the value is rounded up. For example, if you specify a value + of 90, users are disconnected after 2 minutes of inactivity. - `"ImageArn"`: The ARN of the public, private, or shared image to use. - `"ImageName"`: The name of the image used to create the fleet. - `"InstanceType"`: The instance type to use when launching fleet instances. The following diff --git a/src/services/arc_zonal_shift.jl b/src/services/arc_zonal_shift.jl index 57a6c9300b..b038bb8081 100644 --- a/src/services/arc_zonal_shift.jl +++ b/src/services/arc_zonal_shift.jl @@ -48,8 +48,11 @@ autoshift. A practice run configuration includes specifications for blocked date blocked time windows, and for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from -starting. For more information, see Considerations when you configure zonal autoshift in -the Amazon Route 53 Application Recovery Controller Developer Guide. +starting. When a resource has a practice run configuration, Route 53 ARC starts zonal +shifts for the resource weekly, to shift traffic for practice runs. Practice runs help you +to ensure that shifting away traffic from an Availability Zone during an autoshift is safe +for your application. For more information, see Considerations when you configure zonal +autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide. # Arguments - `outcome_alarms`: The outcome alarm for practice runs is a required Amazon CloudWatch @@ -58,10 +61,10 @@ the Amazon Route 53 Application Recovery Controller Developer Guide. from an Availability Zone during each weekly practice run. You should configure the alarm to go into an ALARM state if your application is impacted by the zonal shift, and you want to stop the zonal shift, to let traffic for the resource return to the Availability Zone. -- `resource_identifier`: The identifier of the resource to shift away traffic for when a - practice run starts a zonal shift. The identifier is the Amazon Resource Name (ARN) for the - resource. At this time, supported resources are Network Load Balancers and Application Load - Balancers with cross-zone load balancing turned off. +- `resource_identifier`: The identifier of the resource that Amazon Web Services shifts + traffic for with a practice run zonal shift. The identifier is the Amazon Resource Name + (ARN) for the resource. At this time, supported resources are Network Load Balancers and + Application Load Balancers with cross-zone load balancing turned off. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -158,6 +161,42 @@ function delete_practice_run_configuration( ) end +""" + get_autoshift_observer_notification_status() + get_autoshift_observer_notification_status(params::Dict{String,<:Any}) + +Returns the status of autoshift observer notification. Autoshift observer notification +enables you to be notified, through Amazon EventBridge, when there is an autoshift event +for zonal autoshift. If the status is ENABLED, Route 53 ARC includes all autoshift events +when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, +Route 53 ARC includes only autoshift events for autoshifts when one or more of your +resources is included in the autoshift. For more information, see Notifications for +practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller +Developer Guide. + +""" +function get_autoshift_observer_notification_status(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "GET", + "/autoshift-observer-notification"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_autoshift_observer_notification_status( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "GET", + "/autoshift-observer-notification", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_managed_resource(resource_identifier) get_managed_resource(resource_identifier, params::Dict{String,<:Any}) @@ -170,10 +209,10 @@ start a zonal shift or configure zonal autoshift for Network Load Balancers and Load Balancers with cross-zone load balancing turned off. # Arguments -- `resource_identifier`: The identifier for the resource to shift away traffic for. The - identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported - resources are Network Load Balancers and Application Load Balancers with cross-zone load - balancing turned off. +- `resource_identifier`: The identifier for the resource that Amazon Web Services shifts + traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this + time, supported resources are Network Load Balancers and Application Load Balancers with + cross-zone load balancing turned off. """ function get_managed_resource( @@ -204,7 +243,9 @@ end list_autoshifts() list_autoshifts(params::Dict{String,<:Any}) -Returns the active autoshifts for a specified resource. +Returns a list of autoshifts for an Amazon Web Services Region. By default, the call +returns only ACTIVE autoshifts. Optionally, you can specify the status parameter to return +COMPLETED autoshifts. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -269,9 +310,9 @@ end Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. -ListZonalShifts returns customer-started zonal shifts, as well as practice run zonal shifts -that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts operation -does not list autoshifts. For more information about listing autoshifts, see +ListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal +shifts that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts +operation does not list autoshifts. For more information about listing autoshifts, see \">ListAutoshifts. # Optional Parameters @@ -324,9 +365,10 @@ Availability Zone to complete. For more information, see Zonal shift in the Amaz Application Recovery Controller Developer Guide. # Arguments -- `away_from`: The Availability Zone that traffic is moved away from for a resource when - you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the - resource is instead moved to other Availability Zones in the Amazon Web Services Region. +- `away_from`: The Availability Zone (for example, use1-az1) that traffic is moved away + from for a resource when you start a zonal shift. Until the zonal shift expires or you + cancel it, traffic for the resource is instead moved to other Availability Zones in the + Amazon Web Services Region. - `comment`: A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string. @@ -340,10 +382,10 @@ Application Recovery Controller Developer Guide. A lowercase letter m: To specify that the value is in minutes. A lowercase letter h: To specify that the value is in hours. For example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours). -- `resource_identifier`: The identifier for the resource to shift away traffic for. The - identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported - resources are Network Load Balancers and Application Load Balancers with cross-zone load - balancing turned off. +- `resource_identifier`: The identifier for the resource that Amazon Web Services shifts + traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this + time, supported resources are Network Load Balancers and Application Load Balancers with + cross-zone load balancing turned off. """ function start_zonal_shift( @@ -394,6 +436,50 @@ function start_zonal_shift( ) end +""" + update_autoshift_observer_notification_status(status) + update_autoshift_observer_notification_status(status, params::Dict{String,<:Any}) + +Update the status of autoshift observer notification. Autoshift observer notification +enables you to be notified, through Amazon EventBridge, when there is an autoshift event +for zonal autoshift. If the status is ENABLED, Route 53 ARC includes all autoshift events +when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, +Route 53 ARC includes only autoshift events for autoshifts when one or more of your +resources is included in the autoshift. For more information, see Notifications for +practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller +Developer Guide. + +# Arguments +- `status`: The status to set for autoshift observer notification. If the status is + ENABLED, Route 53 ARC includes all autoshift events when you use the Amazon EventBridge + pattern Autoshift In Progress. When the status is DISABLED, Route 53 ARC includes only + autoshift events for autoshifts when one or more of your resources is included in the + autoshift. + +""" +function update_autoshift_observer_notification_status( + status; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "PUT", + "/autoshift-observer-notification", + Dict{String,Any}("status" => status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_autoshift_observer_notification_status( + status, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "PUT", + "/autoshift-observer-notification", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("status" => status), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_practice_run_configuration(resource_identifier) update_practice_run_configuration(resource_identifier, params::Dict{String,<:Any}) @@ -457,17 +543,25 @@ end update_zonal_autoshift_configuration(resource_identifier, zonal_autoshift_status) update_zonal_autoshift_configuration(resource_identifier, zonal_autoshift_status, params::Dict{String,<:Any}) -You can update the zonal autoshift status for a resource, to enable or disable zonal -autoshift. When zonal autoshift is ENABLED, Amazon Web Services shifts away resource -traffic from an Availability Zone, on your behalf, when Amazon Web Services determines that -there's an issue in the Availability Zone that could potentially affect customers. +The zonal autoshift configuration for a resource includes the practice run configuration +and the status for running autoshifts, zonal autoshift status. When a resource has a +practice run configuation, Route 53 ARC starts weekly zonal shifts for the resource, to +shift traffic away from an Availability Zone. Weekly practice runs help you to make sure +that your application can continue to operate normally with the loss of one Availability +Zone. You can update the zonal autoshift autoshift status to enable or disable zonal +autoshift. When zonal autoshift is ENABLED, you authorize Amazon Web Services to shift away +resource traffic for an application from an Availability Zone during events, on your +behalf, to help reduce time to recovery. Traffic is also shifted away for the required +weekly practice runs. # Arguments - `resource_identifier`: The identifier for the resource that you want to update the zonal autoshift configuration for. The identifier is the Amazon Resource Name (ARN) for the resource. - `zonal_autoshift_status`: The zonal autoshift status for the resource that you want to - update the zonal autoshift configuration for. + update the zonal autoshift configuration for. Choose ENABLED to authorize Amazon Web + Services to shift away resource traffic for an application from an Availability Zone during + events, on your behalf, to help reduce time to recovery. """ function update_zonal_autoshift_configuration( diff --git a/src/services/bedrock.jl b/src/services/bedrock.jl index d4d5f6aad1..0dfe38ab01 100644 --- a/src/services/bedrock.jl +++ b/src/services/bedrock.jl @@ -10,7 +10,7 @@ using AWS.UUIDs API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for -creating a model evaluation job see, Model evaluations. +creating a model evaluation job see, Model evaluation. # Arguments - `evaluation_config`: Specifies whether the model evaluation job is automatic or uses @@ -98,22 +98,20 @@ end create_guardrail(blocked_input_messaging, blocked_outputs_messaging, name) create_guardrail(blocked_input_messaging, blocked_outputs_messaging, name, params::Dict{String,<:Any}) -Creates a guardrail to block topics and to filter out harmful content. Specify a name and -optional description. Specify messages for when the guardrail successfully blocks a -prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields. - Specify topics for the guardrail to deny in the topicPolicyConfig object. Each -GuardrailTopicConfig object in the topicsConfig list pertains to one topic. Give a name -and description so that the guardrail can properly identify the topic. Specify DENY in -the type field. (Optional) Provide up to five prompts that you would categorize as -belonging to the topic in the examples list. Specify filter strengths for the harmful -categories defined in Amazon Bedrock in the contentPolicyConfig object. Each -GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful -category. For more information, see Content filters. For more information about the fields -in a content filter, see GuardrailContentFilterConfig. Specify the category in the type -field. Specify the strength of the filter for prompts in the inputStrength field and for -model responses in the strength field of the GuardrailContentFilterConfig. (Optional) -For security, include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any -tags to the guardrail in the tags object. For more information, see Tag resources. +Creates a guardrail to block topics and to implement safeguards for your generative AI +applications. You can configure the following policies in a guardrail to avoid undesirable +and harmful content, filter out denied topics and words, and remove sensitive information +for privacy protection. Content filters - Adjust filter strengths to block input prompts +or model responses containing harmful content. Denied topics - Define a set of topics +that are undesirable in the context of your application. These topics will be blocked if +detected in user queries or model responses. Word filters - Configure filters to block +undesirable words, phrases, and profanity. Such words can include offensive terms, +competitor names etc. Sensitive information filters - Block or mask sensitive +information such as personally identifiable information (PII) or custom regex in user +inputs and model responses. In addition to the above policies, you can also configure the +messages to be returned to the user if a user input or model response is in violation of +the policies defined in the guardrail. For more information, see Guardrails for Amazon +Bedrock in the Amazon Bedrock User Guide. # Arguments - `blocked_input_messaging`: The message to return when the guardrail blocks a prompt. @@ -128,6 +126,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency in the Amazon S3 User Guide. - `"contentPolicyConfig"`: The content filter policies to configure for the guardrail. +- `"contextualGroundingPolicyConfig"`: The contextual grounding policy configuration used + to create a guardrail. - `"description"`: A description of the guardrail. - `"kmsKeyId"`: The ARN of the KMS key that you use to encrypt the guardrail. - `"sensitiveInformationPolicyConfig"`: The sensitive information policy to configure for @@ -191,7 +191,8 @@ you are satisfied with a configuration, or to compare the configuration with ano version. # Arguments -- `guardrail_identifier`: The unique identifier of the guardrail. +- `guardrail_identifier`: The unique identifier of the guardrail. This can be an ID or the + ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -230,6 +231,67 @@ function create_guardrail_version( ) end +""" + create_model_copy_job(source_model_arn, target_model_name) + create_model_copy_job(source_model_arn, target_model_name, params::Dict{String,<:Any}) + +Copies a model to another region so that it can be used there. For more information, see +Copy models to be used in other regions in the Amazon Bedrock User Guide. + +# Arguments +- `source_model_arn`: The Amazon Resource Name (ARN) of the model to be copied. +- `target_model_name`: A name for the copied model. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier to ensure that the API + request completes no more than one time. If this token matches a previous request, Amazon + Bedrock ignores the request, but does not return an error. For more information, see + Ensuring idempotency. +- `"modelKmsKeyId"`: The ARN of the KMS key that you use to encrypt the model copy. +- `"targetModelTags"`: Tags to associate with the target model. For more information, see + Tag resources in the Amazon Bedrock User Guide. +""" +function create_model_copy_job( + sourceModelArn, targetModelName; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "POST", + "/model-copy-jobs", + Dict{String,Any}( + "sourceModelArn" => sourceModelArn, + "targetModelName" => targetModelName, + "clientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_model_copy_job( + sourceModelArn, + targetModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-copy-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sourceModelArn" => sourceModelArn, + "targetModelName" => targetModelName, + "clientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_model_customization_job(base_model_identifier, custom_model_name, hyper_parameters, job_name, output_data_config, role_arn, training_data_config) create_model_customization_job(base_model_identifier, custom_model_name, hyper_parameters, job_name, output_data_config, role_arn, training_data_config, params::Dict{String,<:Any}) @@ -461,7 +523,8 @@ guardrailIdentifier field. If you delete a guardrail, all of its versions will b guardrailIdentifier field and the version in the guardrailVersion field. # Arguments -- `guardrail_identifier`: The unique identifier of the guardrail. +- `guardrail_identifier`: The unique identifier of the guardrail. This can be an ID or the + ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -597,7 +660,7 @@ end get_evaluation_job(job_identifier, params::Dict{String,<:Any}) Retrieves the properties associated with a model evaluation job, including the status of -the job. For more information, see Model evaluations. +the job. For more information, see Model evaluation. # Arguments - `job_identifier`: The Amazon Resource Name (ARN) of the model evaluation job. @@ -670,6 +733,7 @@ details for the DRAFT version. # Arguments - `guardrail_identifier`: The unique identifier of the guardrail for which to get details. + This can be an ID or the ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -700,6 +764,37 @@ function get_guardrail( ) end +""" + get_model_copy_job(job_arn) + get_model_copy_job(job_arn, params::Dict{String,<:Any}) + +Retrieves information about a model copy job. For more information, see Copy models to be +used in other regions in the Amazon Bedrock User Guide. + +# Arguments +- `job_arn`: The Amazon Resource Name (ARN) of the model copy job. + +""" +function get_model_copy_job(jobArn; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", + "/model-copy-jobs/$(jobArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_model_copy_job( + jobArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-copy-jobs/$(jobArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_model_customization_job(job_identifier) get_model_customization_job(job_identifier, params::Dict{String,<:Any}) @@ -816,10 +911,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"creationTimeBefore"`: Return custom models created before the specified time. - `"foundationModelArnEquals"`: Return custom models only if the foundation model Amazon Resource Name (ARN) matches this parameter. -- `"maxResults"`: Maximum number of results to return in the response. +- `"isOwned"`: Return custom models depending on if the current account owns them (true) or + if they were shared with the current account (false). +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. - `"nameContains"`: Return custom models only if the job name contains these characters. -- `"nextToken"`: Continuation token from the previous response, for Amazon Bedrock to list - the next set of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. - `"sortBy"`: The field to sort by in the returned list of models. - `"sortOrder"`: The sort order of the results. """ @@ -924,7 +1024,8 @@ another ListGuardrails request to see the next batch of results. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"guardrailIdentifier"`: The unique identifier of the guardrail. +- `"guardrailIdentifier"`: The unique identifier of the guardrail. This can be an ID or the + ARN. - `"maxResults"`: The maximum number of results to return in the response. - `"nextToken"`: If there are more results than were returned in the response, the response returns a nextToken that you can send in another ListGuardrails request to see the next @@ -943,6 +1044,52 @@ function list_guardrails( ) end +""" + list_model_copy_jobs() + list_model_copy_jobs(params::Dict{String,<:Any}) + +Returns a list of model copy jobs that you have submitted. You can filter the jobs to +return based on one or more criteria. For more information, see Copy models to be used in +other regions in the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"creationTimeAfter"`: Filters for model copy jobs created after the specified time. +- `"creationTimeBefore"`: Filters for model copy jobs created before the specified time. +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +- `"outputModelNameContains"`: Filters for model copy jobs in which the name of the copied + model contains the string that you specify. +- `"sortBy"`: The field to sort by in the returned list of model copy jobs. +- `"sortOrder"`: Specifies whether to sort the results in ascending or descending order. +- `"sourceAccountEquals"`: Filters for model copy jobs in which the account that the source + model belongs to is equal to the value that you specify. +- `"sourceModelArnEquals"`: Filters for model copy jobs in which the Amazon Resource Name + (ARN) of the source model to is equal to the value that you specify. +- `"statusEquals"`: Filters for model copy jobs whose status matches the value that you + specify. +""" +function list_model_copy_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", "/model-copy-jobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_model_copy_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-copy-jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_model_customization_jobs() list_model_customization_jobs(params::Dict{String,<:Any}) @@ -955,11 +1102,14 @@ Amazon Bedrock User Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"creationTimeAfter"`: Return customization jobs created after the specified time. - `"creationTimeBefore"`: Return customization jobs created before the specified time. -- `"maxResults"`: Maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. - `"nameContains"`: Return customization jobs only if the job name contains these characters. -- `"nextToken"`: Continuation token from the previous response, for Amazon Bedrock to list - the next set of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. - `"sortBy"`: The field to sort by in the returned list of jobs. - `"sortOrder"`: The sort order of the results. - `"statusEquals"`: Return customization jobs with the specified status. @@ -1282,19 +1432,21 @@ filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig. Specify the category in the type field. Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig. (Optional) For security, -include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any tags to the -guardrail in the tags object. For more information, see Tag resources. +include the ARN of a KMS key in the kmsKeyId field. # Arguments - `blocked_input_messaging`: The message to return when the guardrail blocks a prompt. - `blocked_outputs_messaging`: The message to return when the guardrail blocks a model response. -- `guardrail_identifier`: The unique identifier of the guardrail +- `guardrail_identifier`: The unique identifier of the guardrail. This can be an ID or the + ARN. - `name`: A name for the guardrail. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"contentPolicyConfig"`: The content policy to configure for the guardrail. +- `"contextualGroundingPolicyConfig"`: The contextual grounding policy configuration used + to update a guardrail. - `"description"`: A description of the guardrail. - `"kmsKeyId"`: The ARN of the KMS key with which to encrypt the guardrail. - `"sensitiveInformationPolicyConfig"`: The sensitive information policy to configure for diff --git a/src/services/bedrock_agent.jl b/src/services/bedrock_agent.jl index 5e087e7dd7..4276725649 100644 --- a/src/services/bedrock_agent.jl +++ b/src/services/bedrock_agent.jl @@ -79,8 +79,10 @@ Resource Name (ARN) of the role with permissions to invoke API operations on an (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time -expires, the subsequent InvokeAgent request begins a new session. To override the -default prompt behavior for agent orchestration and to use advanced prompts, include a +expires, the subsequent InvokeAgent request begins a new session. To enable your agent +to retain conversational context across multiple sessions, include a memoryConfiguration +object. For more information, see Configure memory. To override the default prompt +behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. If you agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot. @@ -109,6 +111,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys expires and Amazon Bedrock deletes any data provided before the timeout. - `"instruction"`: Instructions that tell the agent what it should do and how it should interact with users. +- `"memoryConfiguration"`: Contains the details of the memory configured for the agent. - `"promptOverrideConfiguration"`: Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts. - `"tags"`: Any tags that you want to attach to the agent. @@ -152,11 +155,13 @@ Creates an action group for an agent. An action group represents the actions tha can carry out for the customer by defining the APIs that an agent can call and the logic for calling them. To allow your agent to request the user for additional information when trying to complete a task, add an action group with the parentActionGroupSignature field -set to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor -fields blank for this action group. During orchestration, if your agent determines that it -needs to invoke an API in an action group, but doesn't have enough information to complete -the API request, it will invoke this action group instead and return an Observation -reprompting the user for more information. +set to AMAZON.UserInput. To allow your agent to generate, run, and troubleshoot code when +trying to complete a task, add an action group with the parentActionGroupSignature field +set to AMAZON.CodeInterpreter. You must leave the description, apiSchema, and +actionGroupExecutor fields blank for this action group. During orchestration, if your agent +determines that it needs to invoke an API in an action group, but doesn't have enough +information to complete the API request, it will invoke this action group instead and +return an Observation reprompting the user for more information. # Arguments - `action_group_name`: The name to give the action group. @@ -183,10 +188,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"parentActionGroupSignature"`: To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action - group. During orchestration, if your agent determines that it needs to invoke an API in an - action group, but doesn't have enough information to complete the API request, it will - invoke this action group instead and return an Observation reprompting the user for more - information. + group. To allow your agent to generate, run, and troubleshoot code when trying to complete + a task, set this field to AMAZON.CodeInterpreter. You must leave the description, + apiSchema, and actionGroupExecutor fields blank for this action group. During + orchestration, if your agent determines that it needs to invoke an API in an action group, + but doesn't have enough information to complete the API request, it will invoke this action + group instead and return an Observation reprompting the user for more information. """ function create_agent_action_group( actionGroupName, @@ -288,11 +295,11 @@ end create_data_source(data_source_configuration, knowledge_base_id, name) create_data_source(data_source_configuration, knowledge_base_id, name, params::Dict{String,<:Any}) -Sets up a data source to be added to a knowledge base. You can't change the -chunkingConfiguration after you create the data source. +Creates a data source connector for a knowledge base. You can't change the +chunkingConfiguration after you create the data source connector. # Arguments -- `data_source_configuration`: Contains metadata about where the data source is stored. +- `data_source_configuration`: The connection configuration for the data source. - `knowledge_base_id`: The unique identifier of the knowledge base to which to add the data source. - `name`: The name of the data source. @@ -303,7 +310,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. -- `"dataDeletionPolicy"`: The data deletion policy assigned to the data source. +- `"dataDeletionPolicy"`: The data deletion policy for the data source. You can set the + data deletion policy to: DELETE: Deletes all underlying data belonging to the data source + from the vector store upon deletion of a knowledge base or data source resource. Note that + the vector store itself is not deleted, only the underlying data. This flag is ignored if + an Amazon Web Services account is deleted. RETAIN: Retains all underlying data in your + vector store upon deletion of a knowledge base or data source resource. - `"description"`: A description of the data source. - `"serverSideEncryptionConfiguration"`: Contains details about the server-side encryption for the data source. @@ -354,6 +366,185 @@ function create_data_source( ) end +""" + create_flow(execution_role_arn, name) + create_flow(execution_role_arn, name, params::Dict{String,<:Any}) + +Creates a prompt flow that you can use to send an input through various steps to yield an +output. Configure nodes, each of which corresponds to a step of the flow, and create +connections between the nodes to create paths to different outputs. For more information, +see How it works and Create a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `execution_role_arn`: The Amazon Resource Name (ARN) of the service role with permissions + to create and manage a flow. For more information, see Create a service role for flows in + Amazon Bedrock in the Amazon Bedrock User Guide. +- `name`: A name for the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the flow. +- `"definition"`: A definition of the nodes and connections between nodes in the flow. +- `"description"`: A description for the flow. +- `"tags"`: Any tags that you want to attach to the flow. For more information, see Tagging + resources in Amazon Bedrock. +""" +function create_flow( + executionRoleArn, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/flows/", + Dict{String,Any}( + "executionRoleArn" => executionRoleArn, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_flow( + executionRoleArn, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "executionRoleArn" => executionRoleArn, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_flow_alias(flow_identifier, name, routing_configuration) + create_flow_alias(flow_identifier, name, routing_configuration, params::Dict{String,<:Any}) + +Creates an alias of a flow for deployment. For more information, see Deploy a flow in +Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow for which to create an alias. +- `name`: A name for the alias. +- `routing_configuration`: Contains information about the version to which to map the alias. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description for the alias. +- `"tags"`: Any tags that you want to attach to the alias of the flow. For more + information, see Tagging resources in Amazon Bedrock. +""" +function create_flow_alias( + flowIdentifier, + name, + routingConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/aliases", + Dict{String,Any}( + "name" => name, + "routingConfiguration" => routingConfiguration, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_flow_alias( + flowIdentifier, + name, + routingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/aliases", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "routingConfiguration" => routingConfiguration, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_flow_version(flow_identifier) + create_flow_version(flow_identifier, params::Dict{String,<:Any}) + +Creates a version of the flow that you can deploy. For more information, see Deploy a flow +in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow that you want to create a version of. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description of the version of the flow. +""" +function create_flow_version( + flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/versions", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_flow_version( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/versions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_knowledge_base(knowledge_base_configuration, name, role_arn, storage_configuration) create_knowledge_base(knowledge_base_configuration, name, role_arn, storage_configuration, params::Dict{String,<:Any}) @@ -445,6 +636,108 @@ function create_knowledge_base( ) end +""" + create_prompt(name) + create_prompt(name, params::Dict{String,<:Any}) + +Creates a prompt in your prompt library that you can add to a flow. For more information, +see Prompt management in Amazon Bedrock, Create a prompt using Prompt management and Prompt +flows in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `name`: A name for the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the prompt. +- `"defaultVariant"`: The name of the default variant for the prompt. This value must match + the name field in the relevant PromptVariant object. +- `"description"`: A description for the prompt. +- `"tags"`: Any tags that you want to attach to the prompt. For more information, see + Tagging resources in Amazon Bedrock. +- `"variants"`: A list of objects, each containing details about a variant of the prompt. +""" +function create_prompt(name; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/prompts/", + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_prompt( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/prompts/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_prompt_version(prompt_identifier) + create_prompt_version(prompt_identifier, params::Dict{String,<:Any}) + +Creates a static snapshot of your prompt that can be deployed to production. For more +information, see Deploy prompts using Prompt management by creating versions in the Amazon +Bedrock User Guide. + +# Arguments +- `prompt_identifier`: The unique identifier of the prompt that you want to create a + version of. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description for the version of the prompt. +- `"tags"`: Any tags that you want to attach to the version of the prompt. For more + information, see Tagging resources in Amazon Bedrock. +""" +function create_prompt_version( + promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/prompts/$(promptIdentifier)/versions", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_prompt_version( + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/prompts/$(promptIdentifier)/versions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_agent(agent_id) delete_agent(agent_id, params::Dict{String,<:Any}) @@ -637,6 +930,120 @@ function delete_data_source( ) end +""" + delete_flow(flow_identifier) + delete_flow(flow_identifier, params::Dict{String,<:Any}) + +Deletes a flow. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipResourceInUseCheck"`: By default, this value is false and deletion is stopped if + the resource is in use. If you set it to true, the resource will be deleted even if the + resource is in use. +""" +function delete_flow(flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_flow( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_flow_alias(alias_identifier, flow_identifier) + delete_flow_alias(alias_identifier, flow_identifier, params::Dict{String,<:Any}) + +Deletes an alias of a flow. + +# Arguments +- `alias_identifier`: The unique identifier of the alias to be deleted. +- `flow_identifier`: The unique identifier of the flow that the alias belongs to. + +""" +function delete_flow_alias( + aliasIdentifier, flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_flow_alias( + aliasIdentifier, + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_flow_version(flow_identifier, flow_version) + delete_flow_version(flow_identifier, flow_version, params::Dict{String,<:Any}) + +Deletes a version of a flow. + +# Arguments +- `flow_identifier`: The unique identifier of the flow whose version that you want to delete +- `flow_version`: The version of the flow that you want to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipResourceInUseCheck"`: By default, this value is false and deletion is stopped if + the resource is in use. If you set it to true, the resource will be deleted even if the + resource is in use. +""" +function delete_flow_version( + flowIdentifier, flowVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_flow_version( + flowIdentifier, + flowVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_knowledge_base(knowledge_base_id) delete_knowledge_base(knowledge_base_id, params::Dict{String,<:Any}) @@ -673,6 +1080,43 @@ function delete_knowledge_base( ) end +""" + delete_prompt(prompt_identifier) + delete_prompt(prompt_identifier, params::Dict{String,<:Any}) + +Deletes a prompt or a prompt version from the Prompt management tool. For more information, +see Delete prompts from the Prompt management tool and Delete a version of a prompt from +the Prompt management tool in the Amazon Bedrock User Guide. + +# Arguments +- `prompt_identifier`: The unique identifier of the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"promptVersion"`: The version of the prompt to delete. +""" +function delete_prompt(promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "DELETE", + "/prompts/$(promptIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_prompt( + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/prompts/$(promptIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_agent_knowledge_base(agent_id, agent_version, knowledge_base_id) disassociate_agent_knowledge_base(agent_id, agent_version, knowledge_base_id, params::Dict{String,<:Any}) @@ -926,7 +1370,114 @@ function get_data_source( ) return bedrock_agent( "GET", - "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_flow(flow_identifier) + get_flow(flow_identifier, params::Dict{String,<:Any}) + +Retrieves information about a flow. For more information, see Manage a flow in Amazon +Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +""" +function get_flow(flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_flow_alias(alias_identifier, flow_identifier) + get_flow_alias(alias_identifier, flow_identifier, params::Dict{String,<:Any}) + +Retrieves information about a flow. For more information, see Deploy a flow in Amazon +Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `alias_identifier`: The unique identifier of the alias for which to retrieve information. +- `flow_identifier`: The unique identifier of the flow that the alias belongs to. + +""" +function get_flow_alias( + aliasIdentifier, flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow_alias( + aliasIdentifier, + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_flow_version(flow_identifier, flow_version) + get_flow_version(flow_identifier, flow_version, params::Dict{String,<:Any}) + +Retrieves information about a version of a flow. For more information, see Deploy a flow in +Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow for which to get information. +- `flow_version`: The version of the flow for which to get information. + +""" +function get_flow_version( + flowIdentifier, flowVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow_version( + flowIdentifier, + flowVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1010,6 +1561,43 @@ function get_knowledge_base( ) end +""" + get_prompt(prompt_identifier) + get_prompt(prompt_identifier, params::Dict{String,<:Any}) + +Retrieves information about a prompt or a version of it. For more information, see View +information about prompts using Prompt management and View information about a version of +your prompt in the Amazon Bedrock User Guide. + +# Arguments +- `prompt_identifier`: The unique identifier of the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"promptVersion"`: The version of the prompt about which you want to retrieve information. +""" +function get_prompt(promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", + "/prompts/$(promptIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_prompt( + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/prompts/$(promptIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_agent_action_groups(agent_id, agent_version) list_agent_action_groups(agent_id, agent_version, params::Dict{String,<:Any}) @@ -1247,6 +1835,120 @@ function list_data_sources( ) end +""" + list_flow_aliases(flow_identifier) + list_flow_aliases(flow_identifier, params::Dict{String,<:Any}) + +Returns a list of aliases for a flow. + +# Arguments +- `flow_identifier`: The unique identifier of the flow for which aliases are being returned. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_flow_aliases( + flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_flow_aliases( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_flow_versions(flow_identifier) + list_flow_versions(flow_identifier, params::Dict{String,<:Any}) + +Returns a list of information about each flow. For more information, see Deploy a flow in +Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_flow_versions( + flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_flow_versions( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_flows() + list_flows(params::Dict{String,<:Any}) + +Returns a list of flows and information about each flow. For more information, see Manage a +flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_flows(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", "/flows/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_flows( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", "/flows/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_ingestion_jobs(data_source_id, knowledge_base_id) list_ingestion_jobs(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1327,6 +2029,37 @@ function list_knowledge_bases( ) end +""" + list_prompts() + list_prompts(params::Dict{String,<:Any}) + +Returns a list of prompts from the Prompt management tool and information about each +prompt. For more information, see View information about prompts using Prompt management in +the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +- `"promptIdentifier"`: The unique identifier of the prompt. +""" +function list_prompts(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", "/prompts/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_prompts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", "/prompts/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -1391,6 +2124,39 @@ function prepare_agent( ) end +""" + prepare_flow(flow_identifier) + prepare_flow(flow_identifier, params::Dict{String,<:Any}) + +Prepares the DRAFT version of a flow so that it can be invoked. For more information, see +Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +""" +function prepare_flow(flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function prepare_flow( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_ingestion_job(data_source_id, knowledge_base_id) start_ingestion_job(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1539,6 +2305,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys expires and Amazon Bedrock deletes any data provided before the timeout. - `"instruction"`: Specifies new instructions that tell the agent what it should do and how it should interact with users. +- `"memoryConfiguration"`: Specifies the new memory configuration for the agent. - `"promptOverrideConfiguration"`: Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts. """ @@ -1756,20 +2523,21 @@ end update_data_source(data_source_configuration, data_source_id, knowledge_base_id, name) update_data_source(data_source_configuration, data_source_id, knowledge_base_id, name, params::Dict{String,<:Any}) -Updates configurations for a data source. You can't change the chunkingConfiguration after -you create the data source. Specify the existing chunkingConfiguration. +Updates the configurations for a data source connector. You can't change the +chunkingConfiguration after you create the data source connector. Specify the existing +chunkingConfiguration. # Arguments -- `data_source_configuration`: Contains details about the storage configuration of the data - source. +- `data_source_configuration`: The connection configuration for the data source that you + want to update. - `data_source_id`: The unique identifier of the data source. -- `knowledge_base_id`: The unique identifier of the knowledge base to which the data source - belongs. +- `knowledge_base_id`: The unique identifier of the knowledge base for the data source. - `name`: Specifies a new name for the data source. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"dataDeletionPolicy"`: The data deletion policy of the updated data source. +- `"dataDeletionPolicy"`: The data deletion policy for the data source that you want to + update. - `"description"`: Specifies a new description for the data source. - `"serverSideEncryptionConfiguration"`: Contains details about server-side encryption of the data source. @@ -1818,6 +2586,123 @@ function update_data_source( ) end +""" + update_flow(execution_role_arn, flow_identifier, name) + update_flow(execution_role_arn, flow_identifier, name, params::Dict{String,<:Any}) + +Modifies a flow. Include both fields that you want to keep and fields that you want to +change. For more information, see How it works and Create a flow in Amazon Bedrock in the +Amazon Bedrock User Guide. + +# Arguments +- `execution_role_arn`: The Amazon Resource Name (ARN) of the service role with permissions + to create and manage a flow. For more information, see Create a service role for flows in + Amazon Bedrock in the Amazon Bedrock User Guide. +- `flow_identifier`: The unique identifier of the flow. +- `name`: A name for the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the flow. +- `"definition"`: A definition of the nodes and the connections between the nodes in the + flow. +- `"description"`: A description for the flow. +""" +function update_flow( + executionRoleArn, + flowIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/", + Dict{String,Any}("executionRoleArn" => executionRoleArn, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_flow( + executionRoleArn, + flowIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("executionRoleArn" => executionRoleArn, "name" => name), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_flow_alias(alias_identifier, flow_identifier, name, routing_configuration) + update_flow_alias(alias_identifier, flow_identifier, name, routing_configuration, params::Dict{String,<:Any}) + +Modifies the alias of a flow. Include both fields that you want to keep and ones that you +want to change. For more information, see Deploy a flow in Amazon Bedrock in the Amazon +Bedrock User Guide. + +# Arguments +- `alias_identifier`: The unique identifier of the alias. +- `flow_identifier`: The unique identifier of the flow. +- `name`: The name of the flow alias. +- `routing_configuration`: Contains information about the version to which to map the alias. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description for the flow alias. +""" +function update_flow_alias( + aliasIdentifier, + flowIdentifier, + name, + routingConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + Dict{String,Any}("name" => name, "routingConfiguration" => routingConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_flow_alias( + aliasIdentifier, + flowIdentifier, + name, + routingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "routingConfiguration" => routingConfiguration + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_knowledge_base(knowledge_base_configuration, knowledge_base_id, name, role_arn, storage_configuration) update_knowledge_base(knowledge_base_configuration, knowledge_base_id, name, role_arn, storage_configuration, params::Dict{String,<:Any}) @@ -1893,3 +2778,50 @@ function update_knowledge_base( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_prompt(name, prompt_identifier) + update_prompt(name, prompt_identifier, params::Dict{String,<:Any}) + +Modifies a prompt in your prompt library. Include both fields that you want to keep and +fields that you want to replace. For more information, see Prompt management in Amazon +Bedrock and Edit prompts in your prompt library in the Amazon Bedrock User Guide. + +# Arguments +- `name`: A name for the prompt. +- `prompt_identifier`: The unique identifier of the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the prompt. +- `"defaultVariant"`: The name of the default variant for the prompt. This value must match + the name field in the relevant PromptVariant object. +- `"description"`: A description for the prompt. +- `"variants"`: A list of objects, each containing details about a variant of the prompt. +""" +function update_prompt( + name, promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "PUT", + "/prompts/$(promptIdentifier)/", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_prompt( + name, + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/prompts/$(promptIdentifier)/", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/bedrock_agent_runtime.jl b/src/services/bedrock_agent_runtime.jl index 1468f14ec2..ad3ae6d600 100644 --- a/src/services/bedrock_agent_runtime.jl +++ b/src/services/bedrock_agent_runtime.jl @@ -4,24 +4,123 @@ using AWS.AWSServices: bedrock_agent_runtime using AWS.Compat using AWS.UUIDs +""" + delete_agent_memory(agent_alias_id, agent_id) + delete_agent_memory(agent_alias_id, agent_id, params::Dict{String,<:Any}) + +Deletes memory from the specified memory identifier. + +# Arguments +- `agent_alias_id`: The unique identifier of an alias of an agent. +- `agent_id`: The unique identifier of the agent to which the alias belongs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"memoryId"`: The unique identifier of the memory. +""" +function delete_agent_memory( + agentAliasId, agentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent_runtime( + "DELETE", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_agent_memory( + agentAliasId, + agentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "DELETE", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_agent_memory(agent_alias_id, agent_id, memory_id, memory_type) + get_agent_memory(agent_alias_id, agent_id, memory_id, memory_type, params::Dict{String,<:Any}) + +Gets the sessions stored in the memory of the agent. + +# Arguments +- `agent_alias_id`: The unique identifier of an alias of an agent. +- `agent_id`: The unique identifier of the agent to which the alias belongs. +- `memory_id`: The unique identifier of the memory. +- `memory_type`: The type of memory. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxItems"`: The maximum number of items to return in the response. If the total number + of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxItems value provided + in the request, enter the token returned in the nextToken field in the response in this + field to return the next batch of results. +""" +function get_agent_memory( + agentAliasId, + agentId, + memoryId, + memoryType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "GET", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories", + Dict{String,Any}("memoryId" => memoryId, "memoryType" => memoryType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_agent_memory( + agentAliasId, + agentId, + memoryId, + memoryType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "GET", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("memoryId" => memoryId, "memoryType" => memoryType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ invoke_agent(agent_alias_id, agent_id, session_id) invoke_agent(agent_alias_id, agent_id, session_id, params::Dict{String,<:Any}) - The CLI doesn't support InvokeAgent. Sends a prompt for the agent to process and respond -to. Note the following fields for the request: To continue the same conversation with an -agent, use the same sessionId value in the request. To activate trace enablement, turn -enableTrace to true. Trace enablement helps you follow the agent's reasoning process that -led it to the information it processed, the actions it took, and the final result it -yielded. For more information, see Trace enablement. End a conversation by setting -endSession to true. In the sessionState object, you can include attributes for the -session or prompt or, if you configured an action group to return control, results from -invocation of the action group. The response is returned in the bytes field of the chunk -object. The attribution object contains citations for parts of the response. If you set -enableTrace to true in the request, you can trace the agent's steps and reasoning process -that led it to the response. If the action predicted was configured to return control, -the response returns parameters for the action, elicited from the user, in the -returnControl field. Errors are also surfaced in the response. + The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. +Sends a prompt for the agent to process and respond to. Note the following fields for the +request: To continue the same conversation with an agent, use the same sessionId value in +the request. To activate trace enablement, turn enableTrace to true. Trace enablement +helps you follow the agent's reasoning process that led it to the information it processed, +the actions it took, and the final result it yielded. For more information, see Trace +enablement. End a conversation by setting endSession to true. In the sessionState +object, you can include attributes for the session or prompt or, if you configured an +action group to return control, results from invocation of the action group. The response +is returned in the bytes field of the chunk object. The attribution object contains +citations for parts of the response. If you set enableTrace to true in the request, you +can trace the agent's steps and reasoning process that led it to the response. If the +action predicted was configured to return control, the response returns parameters for the +action, elicited from the user, in the returnControl field. Errors are also surfaced in +the response. # Arguments - `agent_alias_id`: The alias of the agent to use. @@ -37,6 +136,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"inputText"`: The prompt text to send the agent. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. +- `"memoryId"`: The unique identifier of the agent memory. - `"sessionState"`: Contains parameters that specify various attributes of the session. For more information, see Control session context. If you include returnControlInvocationResults in the sessionState field, the inputText field will be @@ -68,6 +168,50 @@ function invoke_agent( ) end +""" + invoke_flow(flow_alias_identifier, flow_identifier, inputs) + invoke_flow(flow_alias_identifier, flow_identifier, inputs, params::Dict{String,<:Any}) + +Invokes an alias of a flow to run the inputs that you specify and return the output of each +node as a stream. If there's an error, the error is returned. For more information, see +Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_alias_identifier`: The unique identifier of the flow alias. +- `flow_identifier`: The unique identifier of the flow. +- `inputs`: A list of objects, each containing information about an input into the flow. + +""" +function invoke_flow( + flowAliasIdentifier, + flowIdentifier, + inputs; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "POST", + "/flows/$(flowIdentifier)/aliases/$(flowAliasIdentifier)", + Dict{String,Any}("inputs" => inputs); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function invoke_flow( + flowAliasIdentifier, + flowIdentifier, + inputs, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "POST", + "/flows/$(flowIdentifier)/aliases/$(flowAliasIdentifier)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("inputs" => inputs), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ retrieve(knowledge_base_id, retrieval_query) retrieve(knowledge_base_id, retrieval_query, params::Dict{String,<:Any}) @@ -129,8 +273,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"retrieveAndGenerateConfiguration"`: Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations. - `"sessionConfiguration"`: Contains details about the session with the knowledge base. -- `"sessionId"`: The unique identifier of the session. Reuse the same value to continue the - same session with the knowledge base. +- `"sessionId"`: The unique identifier of the session. When you first make a + RetrieveAndGenerate request, Amazon Bedrock automatically generates this value. You must + reuse this value for all subsequent requests in the same conversational session. This value + allows Amazon Bedrock to maintain context and knowledge from previous interactions. You + can't explicitly set the sessionId yourself. """ function retrieve_and_generate(input; aws_config::AbstractAWSConfig=global_aws_config()) return bedrock_agent_runtime( diff --git a/src/services/bedrock_runtime.jl b/src/services/bedrock_runtime.jl index 3f4cc4d7c6..37265ef0bd 100644 --- a/src/services/bedrock_runtime.jl +++ b/src/services/bedrock_runtime.jl @@ -4,19 +4,70 @@ using AWS.AWSServices: bedrock_runtime using AWS.Compat using AWS.UUIDs +""" + apply_guardrail(content, guardrail_identifier, guardrail_version, source) + apply_guardrail(content, guardrail_identifier, guardrail_version, source, params::Dict{String,<:Any}) + +The action to apply a guardrail. + +# Arguments +- `content`: The content details used in the request to apply the guardrail. +- `guardrail_identifier`: The guardrail identifier used in the request to apply the + guardrail. +- `guardrail_version`: The guardrail version used in the request to apply the guardrail. +- `source`: The source of data used in the request to apply the guardrail. + +""" +function apply_guardrail( + content, + guardrailIdentifier, + guardrailVersion, + source; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/guardrail/$(guardrailIdentifier)/version/$(guardrailVersion)/apply", + Dict{String,Any}("content" => content, "source" => source); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function apply_guardrail( + content, + guardrailIdentifier, + guardrailVersion, + source, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/guardrail/$(guardrailIdentifier)/version/$(guardrailVersion)/apply", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("content" => content, "source" => source), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ converse(messages, model_id) converse(messages, model_id, params::Dict{String,<:Any}) Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code -once and use it with different models. Should a model have unique inference parameters, you -can also pass those unique parameters to the model. For information about the Converse API, -see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a -guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a -model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, -see Converse API examples in the Amazon Bedrock User Guide. This operation requires -permission for the bedrock:InvokeModel action. +once and use it with different models. If a model has unique inference parameters, you can +also pass those unique parameters to the model. Amazon Bedrock doesn't store any text, +images, or documents that you provide as content. The data is only used to generate the +response. For information about the Converse API, see Use the Converse API in the Amazon +Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the +Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in +the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon +Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action. # Arguments - `messages`: The messages that you want to send to the model. @@ -88,12 +139,15 @@ ConverseStream provides a consistent API that works with all Amazon Bedrock mode support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. To find out if a model supports streaming, call GetFoundationModel and check -the responseStreamingSupported field in the response. For information about the Converse -API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use -a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a -model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, -see Conversation streaming example in the Amazon Bedrock User Guide. This operation -requires permission for the bedrock:InvokeModelWithResponseStream action. +the responseStreamingSupported field in the response. The CLI doesn't support streaming +operations in Amazon Bedrock, including ConverseStream. Amazon Bedrock doesn't store any +text, images, or documents that you provide as content. The data is only used to generate +the response. For information about the Converse API, see Use the Converse API in the +Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in +the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) +in the Amazon Bedrock User Guide For example code, see Conversation streaming example in +the Amazon Bedrock User Guide. This operation requires permission for the +bedrock:InvokeModelWithResponseStream action. # Arguments - `messages`: The messages that you want to send to the model. @@ -229,9 +283,10 @@ end Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream. To see if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported -field in the response. The CLI doesn't support InvokeModelWithResponseStream. For example -code, see Invoke model with streaming code example in the Amazon Bedrock User Guide. This -operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action. +field in the response. The CLI doesn't support streaming operations in Amazon Bedrock, +including InvokeModelWithResponseStream. For example code, see Invoke model with streaming +code example in the Amazon Bedrock User Guide. This operation requires permissions to +perform the bedrock:InvokeModelWithResponseStream action. # Arguments - `body`: The prompt and inference parameters in the format specified in the contentType in diff --git a/src/services/chime_sdk_media_pipelines.jl b/src/services/chime_sdk_media_pipelines.jl index 3c9ec4e109..39777491d0 100644 --- a/src/services/chime_sdk_media_pipelines.jl +++ b/src/services/chime_sdk_media_pipelines.jl @@ -320,16 +320,26 @@ end create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration) create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration, params::Dict{String,<:Any}) -Creates an Kinesis video stream pool for the media pipeline. +Creates an Amazon Kinesis Video Stream pool for use with media stream pipelines. If a +meeting uses an opt-in Region as its MediaRegion, the KVS stream must be in that same +Region. For example, if a meeting uses the af-south-1 Region, the KVS stream must also be +in af-south-1. However, if the meeting uses a Region that AWS turns on by default, the KVS +stream can be in any available Region, including an opt-in Region. For example, if the +meeting uses ca-central-1, the KVS stream can be in eu-west-2, us-east-1, af-south-1, or +any other Region that the Amazon Chime SDK supports. To learn which AWS Region a meeting +uses, call the GetMeeting API and use the MediaRegion parameter from the response. For more +information about opt-in Regions, refer to Available Regions in the Amazon Chime SDK +Developer Guide, and Specify which AWS Regions your account can use, in the AWS Account +Management Reference Guide. # Arguments -- `pool_name`: The name of the video stream pool. -- `stream_configuration`: The configuration settings for the video stream. +- `pool_name`: The name of the pool. +- `stream_configuration`: The configuration settings for the stream. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: The token assigned to the client making the request. -- `"Tags"`: The tags assigned to the video stream pool. +- `"Tags"`: The tags assigned to the stream pool. """ function create_media_pipeline_kinesis_video_stream_pool( PoolName, StreamConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -531,10 +541,11 @@ end delete_media_pipeline_kinesis_video_stream_pool(identifier) delete_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) -Deletes an Kinesis video stream pool. +Deletes an Amazon Kinesis Video Stream pool. # Arguments -- `identifier`: The ID of the pool being deleted. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. """ function delete_media_pipeline_kinesis_video_stream_pool( @@ -671,7 +682,8 @@ end Gets an Kinesis video stream pool. # Arguments -- `identifier`: The ID of the video stream pool. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. """ function get_media_pipeline_kinesis_video_stream_pool( @@ -1314,10 +1326,11 @@ end update_media_pipeline_kinesis_video_stream_pool(identifier) update_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) -Updates an Kinesis video stream pool in a media pipeline. +Updates an Amazon Kinesis Video Stream pool in a media pipeline. # Arguments -- `identifier`: The ID of the video stream pool. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/cleanrooms.jl b/src/services/cleanrooms.jl index 0d2c9cc8f5..175829ac3d 100644 --- a/src/services/cleanrooms.jl +++ b/src/services/cleanrooms.jl @@ -440,7 +440,8 @@ Creates a new analysis rule for a configured table. Currently, only one analysis be created for a given configured table. # Arguments -- `analysis_rule_policy`: The entire created configured table analysis rule object. +- `analysis_rule_policy`: The analysis rule policy that was created for the configured + table. - `analysis_rule_type`: The type of analysis rule. - `configured_table_identifier`: The identifier for the configured table to create the analysis rule for. Currently accepts the configured table ID. @@ -560,6 +561,187 @@ function create_configured_table_association( ) end +""" + create_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier) + create_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + + Creates a new analysis rule for an associated configured table. + +# Arguments +- `analysis_rule_policy`: The analysis rule policy that was created for the configured + table association. +- `analysis_rule_type`: The type of analysis rule. +- `configured_table_association_identifier`: The unique ID for the configured table + association. Currently accepts the configured table association ID. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function create_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule", + Dict{String,Any}( + "analysisRulePolicy" => analysisRulePolicy, + "analysisRuleType" => analysisRuleType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "analysisRulePolicy" => analysisRulePolicy, + "analysisRuleType" => analysisRuleType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_id_mapping_table(input_reference_config, membership_identifier, name) + create_id_mapping_table(input_reference_config, membership_identifier, name, params::Dict{String,<:Any}) + +Creates an ID mapping table. + +# Arguments +- `input_reference_config`: The input reference configuration needed to create the ID + mapping table. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table. +- `name`: A name for the ID mapping table. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the ID mapping table. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. This + value is used to encrypt the mapping table data that is stored by Clean Rooms. +- `"tags"`: An optional label that you can assign to a resource when you create it. Each + tag consists of a key and an optional value, both of which you define. When you use + tagging, you can also use tag-based access control in IAM policies to control access to + this resource. +""" +function create_id_mapping_table( + inputReferenceConfig, + membershipIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables", + Dict{String,Any}("inputReferenceConfig" => inputReferenceConfig, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_id_mapping_table( + inputReferenceConfig, + membershipIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputReferenceConfig" => inputReferenceConfig, "name" => name + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_id_namespace_association(input_reference_config, membership_identifier, name) + create_id_namespace_association(input_reference_config, membership_identifier, name, params::Dict{String,<:Any}) + +Creates an ID namespace association. + +# Arguments +- `input_reference_config`: The input reference configuration needed to create the ID + namespace association. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association. +- `name`: The name for the ID namespace association. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the ID namespace association. +- `"idMappingConfig"`: The configuration settings for the ID mapping table. +- `"tags"`: An optional label that you can assign to a resource when you create it. Each + tag consists of a key and an optional value, both of which you define. When you use + tagging, you can also use tag-based access control in IAM policies to control access to + this resource. +""" +function create_id_namespace_association( + inputReferenceConfig, + membershipIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idnamespaceassociations", + Dict{String,Any}("inputReferenceConfig" => inputReferenceConfig, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_id_namespace_association( + inputReferenceConfig, + membershipIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idnamespaceassociations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputReferenceConfig" => inputReferenceConfig, "name" => name + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_membership(collaboration_identifier, query_log_status) create_membership(collaboration_identifier, query_log_status, params::Dict{String,<:Any}) @@ -924,6 +1106,129 @@ function delete_configured_table_association( ) end +""" + delete_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier) + delete_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an analysis rule for a configured table association. + +# Arguments +- `analysis_rule_type`: The type of the analysis rule that you want to delete. +- `configured_table_association_identifier`: The identifier for the configured table + association that's related to the analysis rule that you want to delete. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function delete_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_id_mapping_table(id_mapping_table_identifier, membership_identifier) + delete_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an ID mapping table. + +# Arguments +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table that you + want to delete. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to delete. + +""" +function delete_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_id_namespace_association(id_namespace_association_identifier, membership_identifier) + delete_id_namespace_association(id_namespace_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an ID namespace association. + +# Arguments +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to delete. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to delete. + +""" +function delete_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_member(account_id, collaboration_identifier) delete_member(account_id, collaboration_identifier, params::Dict{String,<:Any}) @@ -1189,6 +1494,46 @@ function get_collaboration_configured_audience_model_association( ) end +""" + get_collaboration_id_namespace_association(collaboration_identifier, id_namespace_association_identifier) + get_collaboration_id_namespace_association(collaboration_identifier, id_namespace_association_identifier, params::Dict{String,<:Any}) + +Retrieves an ID namespace association from a specific collaboration. + +# Arguments +- `collaboration_identifier`: The unique identifier of the collaboration that contains the + ID namespace association that you want to retrieve. +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to retrieve. + +""" +function get_collaboration_id_namespace_association( + collaborationIdentifier, + idNamespaceAssociationIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_collaboration_id_namespace_association( + collaborationIdentifier, + idNamespaceAssociationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_collaboration_privacy_budget_template(collaboration_identifier, privacy_budget_template_identifier) get_collaboration_privacy_budget_template(collaboration_identifier, privacy_budget_template_identifier, params::Dict{String,<:Any}) @@ -1283,19 +1628,142 @@ function get_configured_table( ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)"; + "/configuredTables/$(configuredTableIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_table( + configuredTableIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/configuredTables/$(configuredTableIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier) + get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier, params::Dict{String,<:Any}) + +Retrieves a configured table analysis rule. + +# Arguments +- `analysis_rule_type`: The analysis rule to be retrieved. Configured table analysis rules + are uniquely identified by their configured table identifier and analysis rule type. +- `configured_table_identifier`: The unique identifier for the configured table to + retrieve. Currently accepts the configured table ID. + +""" +function get_configured_table_analysis_rule( + analysisRuleType, + configuredTableIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_table_analysis_rule( + analysisRuleType, + configuredTableIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_table_association(configured_table_association_identifier, membership_identifier) + get_configured_table_association(configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Retrieves a configured table association. + +# Arguments +- `configured_table_association_identifier`: The unique ID for the configured table + association to retrieve. Currently accepts the configured table ID. +- `membership_identifier`: A unique identifier for the membership that the configured table + association belongs to. Currently accepts the membership ID. + +""" +function get_configured_table_association( + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_table_association( + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier) + get_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + + Retrieves the analysis rule for a configured table association. + +# Arguments +- `analysis_rule_type`: The type of analysis rule that you want to retrieve. +- `configured_table_association_identifier`: The identifier for the configured table + association that's related to the analysis rule. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function get_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_configured_table( - configuredTableIdentifier, +function get_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1303,39 +1771,39 @@ function get_configured_table( end """ - get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier) - get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier, params::Dict{String,<:Any}) + get_id_mapping_table(id_mapping_table_identifier, membership_identifier) + get_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) -Retrieves a configured table analysis rule. +Retrieves an ID mapping table. # Arguments -- `analysis_rule_type`: The analysis rule to be retrieved. Configured table analysis rules - are uniquely identified by their configured table identifier and analysis rule type. -- `configured_table_identifier`: The unique identifier for the configured table to - retrieve. Currently accepts the configured table ID. +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table identifier + that you want to retrieve. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to retrieve. """ -function get_configured_table_analysis_rule( - analysisRuleType, - configuredTableIdentifier; +function get_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)"; + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_configured_table_analysis_rule( - analysisRuleType, - configuredTableIdentifier, +function get_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1343,39 +1811,39 @@ function get_configured_table_analysis_rule( end """ - get_configured_table_association(configured_table_association_identifier, membership_identifier) - get_configured_table_association(configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + get_id_namespace_association(id_namespace_association_identifier, membership_identifier) + get_id_namespace_association(id_namespace_association_identifier, membership_identifier, params::Dict{String,<:Any}) -Retrieves a configured table association. +Retrieves an ID namespace association. # Arguments -- `configured_table_association_identifier`: The unique ID for the configured table - association to retrieve. Currently accepts the configured table ID. -- `membership_identifier`: A unique identifier for the membership that the configured table - association belongs to. Currently accepts the membership ID. +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to retrieve. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to retrieve. """ -function get_configured_table_association( - configuredTableAssociationIdentifier, +function get_id_namespace_association( + idNamespaceAssociationIdentifier, membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)"; + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_configured_table_association( - configuredTableAssociationIdentifier, +function get_id_namespace_association( + idNamespaceAssociationIdentifier, membershipIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1691,6 +2159,47 @@ function list_collaboration_configured_audience_model_associations( ) end +""" + list_collaboration_id_namespace_associations(collaboration_identifier) + list_collaboration_id_namespace_associations(collaboration_identifier, params::Dict{String,<:Any}) + +Returns a list of the ID namespace associations in a collaboration. + +# Arguments +- `collaboration_identifier`: The unique identifier of the collaboration that contains the + ID namespace associations that you want to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met.> +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_collaboration_id_namespace_associations( + collaborationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_collaboration_id_namespace_associations( + collaborationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_collaboration_privacy_budget_templates(collaboration_identifier) list_collaboration_privacy_budget_templates(collaboration_identifier, params::Dict{String,<:Any}) @@ -1927,6 +2436,88 @@ function list_configured_tables( ) end +""" + list_id_mapping_tables(membership_identifier) + list_id_mapping_tables(membership_identifier, params::Dict{String,<:Any}) + +Returns a list of ID mapping tables. + +# Arguments +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping tables that you want to view. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_id_mapping_tables( + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idmappingtables"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_id_mapping_tables( + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idmappingtables", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_id_namespace_associations(membership_identifier) + list_id_namespace_associations(membership_identifier, params::Dict{String,<:Any}) + +Returns a list of ID namespace associations. + +# Arguments +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to view. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_id_namespace_associations( + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idnamespaceassociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_id_namespace_associations( + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idnamespaceassociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_members(collaboration_identifier) list_members(collaboration_identifier, params::Dict{String,<:Any}) @@ -2211,6 +2802,46 @@ function list_tags_for_resource( ) end +""" + populate_id_mapping_table(id_mapping_table_identifier, membership_identifier) + populate_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) + +Defines the information that's necessary to populate an ID mapping table. + +# Arguments +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table that you + want to populate. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to populate. + +""" +function populate_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)/populate"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function populate_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)/populate", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ preview_privacy_impact(membership_identifier, parameters) preview_privacy_impact(membership_identifier, parameters, params::Dict{String,<:Any}) @@ -2635,6 +3266,147 @@ function update_configured_table_association( ) end +""" + update_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier) + update_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + + Updates the analysis rule for a configured table association. + +# Arguments +- `analysis_rule_policy`: The updated analysis rule policy for the configured table + association. +- `analysis_rule_type`: The analysis rule type that you want to update. +- `configured_table_association_identifier`: The identifier for the configured table + association to update. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function update_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", + Dict{String,Any}("analysisRulePolicy" => analysisRulePolicy); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("analysisRulePolicy" => analysisRulePolicy), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_id_mapping_table(id_mapping_table_identifier, membership_identifier) + update_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) + +Provides the details that are necessary to update an ID mapping table. + +# Arguments +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table that you + want to update. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A new description for the ID mapping table. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. +""" +function update_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_id_namespace_association(id_namespace_association_identifier, membership_identifier) + update_id_namespace_association(id_namespace_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Provides the details that are necessary to update an ID namespace association. + +# Arguments +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to update. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A new description for the ID namespace association. +- `"idMappingConfig"`: The configuration settings for the ID mapping table. +- `"name"`: A new name for the ID namespace association. +""" +function update_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_membership(membership_identifier) update_membership(membership_identifier, params::Dict{String,<:Any}) diff --git a/src/services/cloudfront.jl b/src/services/cloudfront.jl index 21bf5b85ff..1ce2a1b870 100644 --- a/src/services/cloudfront.jl +++ b/src/services/cloudfront.jl @@ -3692,7 +3692,8 @@ end list_tags_for_resource2020_05_31(resource) list_tags_for_resource2020_05_31(resource, params::Dict{String,<:Any}) -List tags for a CloudFront resource. +List tags for a CloudFront resource. For more information, see Tagging a distribution in +the Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. @@ -3778,7 +3779,8 @@ end tag_resource2020_05_31(resource, tags) tag_resource2020_05_31(resource, tags, params::Dict{String,<:Any}) -Add tags to a CloudFront resource. +Add tags to a CloudFront resource. For more information, see Tagging a distribution in the +Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. @@ -3884,7 +3886,8 @@ end untag_resource2020_05_31(resource, tag_keys) untag_resource2020_05_31(resource, tag_keys, params::Dict{String,<:Any}) -Remove tags from a CloudFront resource. +Remove tags from a CloudFront resource. For more information, see Tagging a distribution in +the Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. diff --git a/src/services/cloudhsm_v2.jl b/src/services/cloudhsm_v2.jl index 55326e9439..6e20bec62a 100644 --- a/src/services/cloudhsm_v2.jl +++ b/src/services/cloudhsm_v2.jl @@ -8,7 +8,8 @@ using AWS.UUIDs copy_backup_to_region(backup_id, destination_region) copy_backup_to_region(backup_id, destination_region, params::Dict{String,<:Any}) -Copy an AWS CloudHSM cluster backup to a different region. +Copy an CloudHSM cluster backup to a different region. Cross-account use: No. You cannot +perform this operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup that will be copied to the destination region. @@ -57,7 +58,9 @@ end create_cluster(hsm_type, subnet_ids) create_cluster(hsm_type, subnet_ids, params::Dict{String,<:Any}) -Creates a new AWS CloudHSM cluster. +Creates a new CloudHSM cluster. Cross-account use: Yes. To perform this operation with an +CloudHSM backup in a different AWS account, specify the full backup ARN in the value of the +SourceBackupId parameter. # Arguments - `hsm_type`: The type of HSM to use in the cluster. The allowed values are hsm1.medium and @@ -71,9 +74,10 @@ Creates a new AWS CloudHSM cluster. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BackupRetentionPolicy"`: A policy that defines how the service retains backups. - `"Mode"`: The mode to use in the cluster. The allowed values are FIPS and NON_FIPS. -- `"SourceBackupId"`: The identifier (ID) of the cluster backup to restore. Use this value - to restore the cluster from a backup instead of creating a new cluster. To find the backup - ID, use DescribeBackups. +- `"SourceBackupId"`: The identifier (ID) or the Amazon Resource Name (ARN) of the cluster + backup to restore. Use this value to restore the cluster from a backup instead of creating + a new cluster. To find the backup ID or ARN, use DescribeBackups. If using a backup in + another account, the full ARN must be supplied. - `"TagList"`: Tags to apply to the CloudHSM cluster during creation. """ function create_cluster( @@ -110,7 +114,9 @@ end create_hsm(availability_zone, cluster_id) create_hsm(availability_zone, cluster_id, params::Dict{String,<:Any}) -Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster. +Creates a new hardware security module (HSM) in the specified CloudHSM cluster. +Cross-account use: No. You cannot perform this operation on an CloudHSM cluster in a +different Amazon Web Service account. # Arguments - `availability_zone`: The Availability Zone where you are creating the HSM. To find the @@ -160,8 +166,10 @@ end delete_backup(backup_id) delete_backup(backup_id, params::Dict{String,<:Any}) -Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the -DeleteBackup request is made. For more information on restoring a backup, see RestoreBackup. +Deletes a specified CloudHSM backup. A backup can be restored up to 7 days after the +DeleteBackup request is made. For more information on restoring a backup, see +RestoreBackup. Cross-account use: No. You cannot perform this operation on an CloudHSM +backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup to be deleted. To find the ID of a backup, use the @@ -195,9 +203,10 @@ end delete_cluster(cluster_id) delete_cluster(cluster_id, params::Dict{String,<:Any}) -Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must -delete all HSMs in the cluster. To see if the cluster contains any HSMs, use -DescribeClusters. To delete an HSM, use DeleteHsm. +Deletes the specified CloudHSM cluster. Before you can delete a cluster, you must delete +all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To +delete an HSM, use DeleteHsm. Cross-account use: No. You cannot perform this operation on +an CloudHSM cluster in a different Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that you are deleting. To find the @@ -234,6 +243,8 @@ end Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters. +Cross-account use: No. You cannot perform this operation on an CloudHSM hsm in a different +Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that contains the HSM that you are @@ -270,15 +281,49 @@ function delete_hsm( ) end +""" + delete_resource_policy() + delete_resource_policy(params::Dict{String,<:Any}) + + Deletes an CloudHSM resource policy. Deleting a resource policy will result in the +resource being unshared and removed from any RAM resource shares. Deleting the resource +policy attached to a backup will not impact any clusters created from that backup. +Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a +different Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource from which the policy will be + removed. +""" +function delete_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "DeleteResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function delete_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "DeleteResourcePolicy", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_backups() describe_backups(params::Dict{String,<:Any}) -Gets information about backups of AWS CloudHSM clusters. This is a paginated operation, -which means that each response might contain only a subset of all the backups. When the -response contains only a subset of backups, it includes a NextToken value. Use this value -in a subsequent DescribeBackups request to get more backups. When you receive a response -with no NextToken (or an empty or null value), that means there are no more backups to get. +Gets information about backups of CloudHSM clusters. Lists either the backups you own or +the backups shared with you when the Shared parameter is true. This is a paginated +operation, which means that each response might contain only a subset of all the backups. +When the response contains only a subset of backups, it includes a NextToken value. Use +this value in a subsequent DescribeBackups request to get more backups. When you receive a +response with no NextToken (or an empty or null value), that means there are no more +backups to get. Cross-account use: Yes. Customers can describe backups in other Amazon Web +Services accounts that are shared with them. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -296,6 +341,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys more backups than the number you specify, the response contains a NextToken value. - `"NextToken"`: The NextToken value that you received in the previous response. Use this value to get more backups. +- `"Shared"`: Describe backups that are shared with you. By default when using this + option, the command returns backups that have been shared using a standard Resource Access + Manager resource share. In order for a backup that was shared using the PutResourcePolicy + command to be returned, the share must be promoted to a standard resource share using the + RAM PromoteResourceShareCreatedFromPolicy API operation. For more information about sharing + backups, see Working with shared backups in the CloudHSM User Guide. - `"SortAscending"`: Designates whether or not to sort the return backups by ascending chronological order of generation. """ @@ -316,11 +367,13 @@ end describe_clusters() describe_clusters(params::Dict{String,<:Any}) -Gets information about AWS CloudHSM clusters. This is a paginated operation, which means -that each response might contain only a subset of all the clusters. When the response -contains only a subset of clusters, it includes a NextToken value. Use this value in a -subsequent DescribeClusters request to get more clusters. When you receive a response with -no NextToken (or an empty or null value), that means there are no more clusters to get. +Gets information about CloudHSM clusters. This is a paginated operation, which means that +each response might contain only a subset of all the clusters. When the response contains +only a subset of clusters, it includes a NextToken value. Use this value in a subsequent +DescribeClusters request to get more clusters. When you receive a response with no +NextToken (or an empty or null value), that means there are no more clusters to get. +Cross-account use: No. You cannot perform this operation on CloudHSM clusters in a +different Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -347,14 +400,40 @@ function describe_clusters( ) end +""" + get_resource_policy() + get_resource_policy(params::Dict{String,<:Any}) + + Retrieves the resource policy document attached to a given resource. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource to which a policy is attached. +""" +function get_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "GetResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "GetResourcePolicy", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ initialize_cluster(cluster_id, signed_cert, trust_anchor) initialize_cluster(cluster_id, signed_cert, trust_anchor, params::Dict{String,<:Any}) -Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing +Claims an CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get -the cluster's CSR, use DescribeClusters. +the cluster's CSR, use DescribeClusters. Cross-account use: No. You cannot perform this +operation on an CloudHSM cluster in a different Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that you are claiming. To find the @@ -412,11 +491,13 @@ end list_tags(resource_id) list_tags(resource_id, params::Dict{String,<:Any}) -Gets a list of tags for the specified AWS CloudHSM cluster. This is a paginated operation, +Gets a list of tags for the specified CloudHSM cluster. This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken -(or an empty or null value), that means there are no more tags to get. +(or an empty or null value), that means there are no more tags to get. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster whose tags you are getting. To @@ -456,7 +537,8 @@ end modify_backup_attributes(backup_id, never_expires) modify_backup_attributes(backup_id, never_expires, params::Dict{String,<:Any}) -Modifies attributes for AWS CloudHSM backup. +Modifies attributes for CloudHSM backup. Cross-account use: No. You cannot perform this +operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The identifier (ID) of the backup to modify. To find the ID of a backup, use @@ -500,7 +582,8 @@ end modify_cluster(backup_retention_policy, cluster_id) modify_cluster(backup_retention_policy, cluster_id, params::Dict{String,<:Any}) -Modifies AWS CloudHSM cluster. +Modifies CloudHSM cluster. Cross-account use: No. You cannot perform this operation on an +CloudHSM cluster in a different Amazon Web Services account. # Arguments - `backup_retention_policy`: A policy that defines how the service retains backups. @@ -543,12 +626,52 @@ function modify_cluster( ) end +""" + put_resource_policy() + put_resource_policy(params::Dict{String,<:Any}) + +Creates or updates an CloudHSM resource policy. A resource policy helps you to define the +IAM entity (for example, an Amazon Web Services account) that can manage your CloudHSM +resources. The following resources support CloudHSM resource policies: Backup - The +resource policy allows you to describe the backup and restore a cluster from the backup in +another Amazon Web Services account. In order to share a backup, it must be in a 'READY' +state and you must own it. While you can share a backup using the CloudHSM +PutResourcePolicy operation, we recommend using Resource Access Manager (RAM) instead. +Using RAM provides multiple benefits as it creates the policy for you, allows multiple +resources to be shared at one time, and increases the discoverability of shared resources. +If you use PutResourcePolicy and want consumers to be able to describe the backups you +share with them, you must promote the backup to a standard RAM Resource Share using the RAM +PromoteResourceShareCreatedFromPolicy API operation. For more information, see Working +with shared backups in the CloudHSM User Guide Cross-account use: No. You cannot perform +this operation on an CloudHSM resource in a different Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Policy"`: The policy you want to associate with a resource. For an example policy, see + Working with shared backups in the CloudHSM User Guide +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource to which you want to attach a + policy. +""" +function put_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "PutResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function put_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "PutResourcePolicy", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ restore_backup(backup_id) restore_backup(backup_id, params::Dict{String,<:Any}) -Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For mor -information on deleting a backup, see DeleteBackup. +Restores a specified CloudHSM backup that is in the PENDING_DELETION state. For more +information on deleting a backup, see DeleteBackup. Cross-account use: No. You cannot +perform this operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup to be restored. To find the ID of a backup, use the @@ -582,7 +705,9 @@ end tag_resource(resource_id, tag_list) tag_resource(resource_id, tag_list, params::Dict{String,<:Any}) -Adds or overwrites one or more tags for the specified AWS CloudHSM cluster. +Adds or overwrites one or more tags for the specified CloudHSM cluster. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster that you are tagging. To find @@ -624,7 +749,9 @@ end untag_resource(resource_id, tag_key_list) untag_resource(resource_id, tag_key_list, params::Dict{String,<:Any}) -Removes the specified tag or tags from the specified AWS CloudHSM cluster. +Removes the specified tag or tags from the specified CloudHSM cluster. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster whose tags you are removing. diff --git a/src/services/codepipeline.jl b/src/services/codepipeline.jl index 50afd54dac..3e0c651485 100644 --- a/src/services/codepipeline.jl +++ b/src/services/codepipeline.jl @@ -872,6 +872,76 @@ function list_pipelines( ) end +""" + list_rule_executions(pipeline_name) + list_rule_executions(pipeline_name, params::Dict{String,<:Any}) + +Lists the rule executions that have occurred in a pipeline configured for conditions with +rules. + +# Arguments +- `pipeline_name`: The name of the pipeline for which you want to get execution summary + information. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: Input information used to filter rule execution history. +- `"maxResults"`: The maximum number of results to return in a single call. To retrieve the + remaining results, make another call with the returned nextToken value. Pipeline history is + limited to the most recent 12 months, based on pipeline execution start times. Default + value is 100. +- `"nextToken"`: The token that was returned from the previous ListRuleExecutions call, + which can be used to return the next set of rule executions in the list. +""" +function list_rule_executions( + pipelineName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codepipeline( + "ListRuleExecutions", + Dict{String,Any}("pipelineName" => pipelineName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_rule_executions( + pipelineName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codepipeline( + "ListRuleExecutions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("pipelineName" => pipelineName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_rule_types() + list_rule_types(params::Dict{String,<:Any}) + +Lists the rules for the condition. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"regionFilter"`: The rule Region to filter on. +- `"ruleOwnerFilter"`: The rule owner to filter on. +""" +function list_rule_types(; aws_config::AbstractAWSConfig=global_aws_config()) + return codepipeline( + "ListRuleTypes"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_rule_types( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codepipeline( + "ListRuleTypes", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -941,6 +1011,66 @@ function list_webhooks( ) end +""" + override_stage_condition(condition_type, pipeline_execution_id, pipeline_name, stage_name) + override_stage_condition(condition_type, pipeline_execution_id, pipeline_name, stage_name, params::Dict{String,<:Any}) + +Used to override a stage condition. + +# Arguments +- `condition_type`: The type of condition to override for the stage, such as entry + conditions, failure conditions, or success conditions. +- `pipeline_execution_id`: The ID of the pipeline execution for the override. +- `pipeline_name`: The name of the pipeline with the stage that will override the condition. +- `stage_name`: The name of the stage for the override. + +""" +function override_stage_condition( + conditionType, + pipelineExecutionId, + pipelineName, + stageName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codepipeline( + "OverrideStageCondition", + Dict{String,Any}( + "conditionType" => conditionType, + "pipelineExecutionId" => pipelineExecutionId, + "pipelineName" => pipelineName, + "stageName" => stageName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function override_stage_condition( + conditionType, + pipelineExecutionId, + pipelineName, + stageName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codepipeline( + "OverrideStageCondition", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "conditionType" => conditionType, + "pipelineExecutionId" => pipelineExecutionId, + "pipelineName" => pipelineName, + "stageName" => stageName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ poll_for_jobs(action_type_id) poll_for_jobs(action_type_id, params::Dict{String,<:Any}) diff --git a/src/services/connect.jl b/src/services/connect.jl index 23a305e19b..92967d277b 100644 --- a/src/services/connect.jl +++ b/src/services/connect.jl @@ -1048,7 +1048,7 @@ provided in the StartAttachedFileUpload API. # Arguments - `file_id`: The unique identifier of the attached file resource. -- `instance_id`: The unique identifier of the Connect instance. +- `instance_id`: The unique identifier of the Amazon Connect instance. - `associated_resource_arn`: The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. @@ -3574,6 +3574,45 @@ function describe_agent_status( ) end +""" + describe_authentication_profile(authentication_profile_id, instance_id) + describe_authentication_profile(authentication_profile_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Describes the target +authentication profile. + +# Arguments +- `authentication_profile_id`: A unique identifier for the authentication profile. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +""" +function describe_authentication_profile( + AuthenticationProfileId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_authentication_profile( + AuthenticationProfileId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_contact(contact_id, instance_id) describe_contact(contact_id, instance_id, params::Dict{String,<:Any}) @@ -5528,32 +5567,35 @@ definitions in the Amazon Connect Administrator Guide. interval for the retrieval of historical metrics data. The time must be later than the start time timestamp. It cannot be later than the current timestamp. - `filters`: The filters to apply to returned metrics. You can filter on the following - resources: Agents Channels Feature Queues Routing profiles Routing step - expression User hierarchy groups At least one filter must be passed from queues, - routing profiles, agents, or user hierarchy groups. To filter by phone number, see Create a - historical metrics report in the Amazon Connect Administrator Guide. Note the following - limits: Filter keys: A maximum of 5 filter keys are supported in a single request. Valid - filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | - AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | + resources: Agents Campaigns Channels Feature Queues Routing profiles Routing + step expression User hierarchy groups At least one filter must be passed from queues, + routing profiles, agents, or user hierarchy groups. For metrics for outbound campaigns + analytics, you can also use campaigns to satisfy at least one filter requirement. To filter + by phone number, see Create a historical metrics report in the Amazon Connect Administrator + Guide. Note the following limits: Filter keys: A maximum of 5 filter keys are supported + in a single request. Valid filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | + AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | + AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | - FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | - FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | RESOURCE_PUBLISHED_TIMESTAMP | - ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED | Filter values: - A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are - valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 - filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, - and 15 routing profiles for a total of 100 filter values, along with 3 channel filters. - contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It - is available only to contacts analyzed by Contact Lens conversational analytics. - connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue - examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key. - ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This - filter is case and order sensitive. JSON string fields must be sorted in ascending order - and JSON array order should be kept as is. Q_CONNECT_ENABLED. TRUE and FALSE are the only - valid filterValues for the Q_CONNECT_ENABLED filter key. TRUE includes all contacts that - had Amazon Q in Connect enabled as part of the flow. FALSE includes all contacts that did - not have Amazon Q in Connect enabled as part of the flow This filter is available only - for contact record-driven metrics. + DISCONNECT_REASON | FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | + FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | + RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | + Q_CONNECT_ENABLED | Filter values: A maximum of 100 filter values are supported in a + single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. + They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 + request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 + filter values, along with 3 channel filters. contact_lens_conversational_analytics is a + valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by + Contact Lens conversational analytics. connect:Chat, connect:SMS, connect:Telephony, and + connect:WebRTC are valid filterValue examples (not exhaustive) for the + contact/segmentAttributes/connect:Subtype filter key. ROUTING_STEP_EXPRESSION is a valid + filter key with a filter value up to 3000 length. This filter is case and order sensitive. + JSON string fields must be sorted in ascending order and JSON array order should be kept as + is. Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the + Q_CONNECT_ENABLED filter key. TRUE includes all contacts that had Amazon Q in Connect + enabled as part of the flow. FALSE includes all contacts that did not have Amazon Q in + Connect enabled as part of the flow This filter is available only for contact + record-driven metrics. Campaign ARNs are valid filterValues for the CAMPAIGN filter key. - `metrics`: The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. @@ -5603,10 +5645,13 @@ definitions in the Amazon Connect Administrator Guide. Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: - Average conversation duration AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: - Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, - Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource - ID, Initiation method, Resource published timestamp UI name: Average flow time + Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for + contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: + Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME + Unit: Seconds Valid groupings and filters: Channel, + contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next + resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, + Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in @@ -5655,26 +5700,25 @@ definitions in the Amazon Connect Administrator Guide. AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in - Connect UI name: Average customer talk time CASES_CREATED Unit: Count Required filter - key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: - Cases created CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| - Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, - contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: - Contact abandoned CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts - abandoned in X seconds CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts answered - in X seconds CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD - Valid groupings and filters: Queue, Channel, Routing Profile, Feature, - contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature - is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric - filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, - Channel, Routing Profile, Agent, Agent Hierarchy, Feature, + Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This + metric is available only for contacts analyzed by outbound campaigns analytics. Unit: + Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer + connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for + contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: + Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 + (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: + Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric + is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent + Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any + whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT + (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED + Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: + CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid + metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing + Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts + created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: + Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: @@ -5698,53 +5742,72 @@ definitions in the Amazon Connect Administrator Guide. contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued - (enqueue timestamp) CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts resolved - in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, Feature, + (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter + any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter + LT (for \"Less than\"). UI name: Contacts removed from queue in X seconds + CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing + Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For + ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For + Comparison, you must enter LT (for \"Less than\"). UI name: Contacts resolved in X + CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Contacts transferred out Feature is a valid filter but not a valid + grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out - Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT - Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts - transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings - and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases FLOWS_OUTCOME Unit: - Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow - type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows - outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: - Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, + and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS + This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: + Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid + groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection + Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE + This metric is available only for contacts analyzed by outbound campaigns analytics, and + with the answering machine detection enabled. Unit: Percent Valid metric filter key: + ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: + Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine + Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: + Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: + Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, + Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource + ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED + Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started - MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, + HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound + campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid + groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: + Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, + Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, + Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI + name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time + MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, - Initiation method, Resource published timestamp UI name: Maximum flow time - MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing - Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI - name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: - Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, - Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource - ID, Initiation method, Resource published timestamp UI name: Minimum flow time + Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: - Queue, RoutingStepExpression UI name: Not available PERCENT_CONTACTS_STEP_JOINED Unit: - Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available - PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid - groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows - module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome - type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows - outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. - PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens - conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in - Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only - for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid - groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but + not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid + groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in + Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: + Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, + contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next + resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, + Initiation method, Resource published timestamp UI name: Flows outcome percentage. The + FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is + available only for contacts analyzed by Contact Lens conversational analytics. Unit: + Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time + percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact + Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, @@ -5762,27 +5825,40 @@ definitions in the Amazon Connect Administrator Guide. Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid - groupings and filters: Queue, RoutingStepExpression UI name: Not available - SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time - SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This - metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | - CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is - not applicable for this metric. SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and - filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: - Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time - SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid - groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in + Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME + Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: + Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following + filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API + connecting time The Negate key in Metric Level Filters is not applicable for this metric. + SUM_CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | + Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: + Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect + Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in + seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts + abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in + Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), + in seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts + answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow + time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid + metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected - SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: - Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid + SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in - Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings - and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time + Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: + Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: + Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, @@ -5808,8 +5884,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE - | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS - | CHANNEL | contact/segmentAttributes/connect:Subtype | FLOWS_RESOURCE_ID | + | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | + ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | + contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION @@ -5999,7 +6076,20 @@ end Imports a claimed phone number from an external service, such as Amazon Pinpoint, into an Amazon Connect instance. You can call this API only in the same Amazon Web Services Region -where the Amazon Connect instance was created. +where the Amazon Connect instance was created. Call the DescribePhoneNumber API to verify +the status of a previous ImportPhoneNumber operation. If you plan to claim or import +numbers and then release numbers frequently, contact us for a service quota exception. +Otherwise, it is possible you will be blocked from claiming and releasing any more numbers +until up to 180 days past the oldest number released has expired. By default you can +claim or import and then release up to 200% of your maximum number of active phone numbers. +If you claim or import and then release phone numbers using the UI or API during a rolling +180 day cycle that exceeds 200% of your phone number service level quota, you will be +blocked from claiming or importing any more numbers until 180 days past the oldest number +released has expired. For example, if you already have 99 claimed or imported numbers and +a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim +99, and then release 99, you will have exceeded the 200% limit. At that point you are +blocked from claiming any more numbers until you open an Amazon Web Services Support +ticket. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6180,6 +6270,48 @@ function list_approved_origins( ) end +""" + list_authentication_profiles(instance_id) + list_authentication_profiles(instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Provides summary information about +the authentication profiles in a specified Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_authentication_profiles( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/authentication-profiles-summary/$(InstanceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_authentication_profiles( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/authentication-profiles-summary/$(InstanceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_bots(instance_id, lex_version) list_bots(instance_id, lex_version, params::Dict{String,<:Any}) @@ -8318,6 +8450,51 @@ function resume_contact_recording( ) end +""" + search_agent_statuses(instance_id) + search_agent_statuses(instance_id, params::Dict{String,<:Any}) + +Searches AgentStatuses in an Amazon Connect instance, with optional filtering. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return agent statuses. +- `"SearchFilter"`: Filters to be applied to search results. +""" +function search_agent_statuses( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-agent-statuses", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_agent_statuses( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-agent-statuses", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_available_phone_numbers(phone_number_country_code, phone_number_type) search_available_phone_numbers(phone_number_country_code, phone_number_type, params::Dict{String,<:Any}) @@ -8770,7 +8947,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys previous response in the next request to retrieve the next set of results. - `"ResourceTypes"`: The list of resource types to be used to search tags from. If not provided or if any empty list is provided, this API will search from all supported resource - types. + types. Supported resource types AGENT ROUTING_PROFILE STANDARD_QUEUE + SECURITY_PROFILE OPERATING_HOURS PROMPT CONTACT_FLOW FLOW_MODULE - `"SearchCriteria"`: The search criteria to be used to return tags. """ function search_resource_tags(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -8894,6 +9072,53 @@ function search_security_profiles( ) end +""" + search_user_hierarchy_groups(instance_id) + search_user_hierarchy_groups(instance_id, params::Dict{String,<:Any}) + +Searches UserHierarchyGroups in an Amazon Connect instance, with optional filtering. The +UserHierarchyGroup with \"LevelId\": \"0\" is the foundation for building levels on top of +an instance. It is not user-definable, nor is it visible in the UI. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return UserHierarchyGroups. +- `"SearchFilter"`: Filters to be applied to search results. +""" +function search_user_hierarchy_groups( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-user-hierarchy-groups", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_user_hierarchy_groups( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-user-hierarchy-groups", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_users(instance_id) search_users(instance_id, params::Dict{String,<:Any}) @@ -9054,13 +9279,13 @@ end start_attached_file_upload(file_name, file_size_in_bytes, file_use_case_type, instance_id, associated_resource_arn, params::Dict{String,<:Any}) Provides a pre-signed Amazon S3 URL in response for uploading your content. You may only -use this API to upload attachments to a Connect Case. +use this API to upload attachments to an Amazon Connect Case. # Arguments - `file_name`: A case-sensitive name of the attached file being uploaded. - `file_size_in_bytes`: The size of the attached file in bytes. - `file_use_case_type`: The use case for the file. -- `instance_id`: The unique identifier of the Connect instance. +- `instance_id`: The unique identifier of the Amazon Connect instance. - `associated_resource_arn`: The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. @@ -9141,8 +9366,9 @@ throttling returns a TooManyRequests exception. The quota for concurrent activ exceeded. Active chat throttling returns a LimitExceededException. If you use the ChatDurationInMinutes parameter and receive a 400 error, your account may not support the ability to configure custom chat durations. For more information, contact Amazon Web -Services Support. For more information about chat, see Chat in the Amazon Connect -Administrator Guide. +Services Support. For more information about chat, see the following topics in the Amazon +Connect Administrator Guide: Concepts: Web and mobile messaging capabilities in Amazon +Connect Amazon Connect Chat security best practices # Arguments - `contact_flow_id`: The identifier of the flow for initiating the chat. To see the @@ -9384,7 +9610,9 @@ end Initiates real-time message streaming for a new chat contact. For more information about message streaming, see Enable real-time chat message streaming in the Amazon Connect -Administrator Guide. +Administrator Guide. For more information about chat, see the following topics in the +Amazon Connect Administrator Guide: Concepts: Web and mobile messaging capabilities in +Amazon Connect Amazon Connect Chat security best practices # Arguments - `chat_streaming_configuration`: The streaming configuration, such as the Amazon SNS @@ -9674,8 +9902,8 @@ Amazon Connect instance (specified as InstanceId). # Arguments - `contact_flow_id`: The identifier of the flow for the call. To see the ContactFlowId in - the Amazon Connect admin website, on the navigation menu go to Routing, Contact Flows. - Choose the flow. On the flow page, under the name of the flow, choose Show additional flow + the Amazon Connect admin website, on the navigation menu go to Routing, Flows. Choose the + flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact -flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx @@ -10317,6 +10545,61 @@ function update_agent_status( ) end +""" + update_authentication_profile(authentication_profile_id, instance_id) + update_authentication_profile(authentication_profile_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Updates the selected +authentication profile. + +# Arguments +- `authentication_profile_id`: A unique identifier for the authentication profile. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowedIps"`: A list of IP address range strings that are allowed to access the + instance. For more information on how to configure IP addresses, seeConfigure session + timeouts in the Amazon Connect Administrator Guide. +- `"BlockedIps"`: A list of IP address range strings that are blocked from accessing the + instance. For more information on how to configure IP addresses, For more information on + how to configure IP addresses, see Configure IP-based access control in the Amazon Connect + Administrator Guide. +- `"Description"`: The description for the authentication profile. +- `"Name"`: The name for the authentication profile. +- `"PeriodicSessionDuration"`: The short lived session duration configuration for users + logged in to Amazon Connect, in minutes. This value determines the maximum possible time + before an agent is authenticated. For more information, For more information on how to + configure IP addresses, see Configure session timeouts in the Amazon Connect Administrator + Guide. +""" +function update_authentication_profile( + AuthenticationProfileId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_authentication_profile( + AuthenticationProfileId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_contact(contact_id, instance_id) update_contact(contact_id, instance_id, params::Dict{String,<:Any}) diff --git a/src/services/connect_contact_lens.jl b/src/services/connect_contact_lens.jl index 4c4d4639c4..99702556ce 100644 --- a/src/services/connect_contact_lens.jl +++ b/src/services/connect_contact_lens.jl @@ -16,7 +16,7 @@ Provides a list of analysis segments for a real-time analysis session. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximimum number of results to return per page. +- `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. """ diff --git a/src/services/controlcatalog.jl b/src/services/controlcatalog.jl index 78a7e27db8..304112bb91 100644 --- a/src/services/controlcatalog.jl +++ b/src/services/controlcatalog.jl @@ -4,6 +4,52 @@ using AWS.AWSServices: controlcatalog using AWS.Compat using AWS.UUIDs +""" + get_control(control_arn) + get_control(control_arn, params::Dict{String,<:Any}) + +Returns details about a specific control, most notably a list of Amazon Web Services +Regions where this control is supported. Input a value for the ControlArn parameter, in ARN +form. GetControl accepts controltower or controlcatalog control ARNs as input. Returns a +controlcatalog ARN format. In the API response, controls that have the value GLOBAL in the +Scope field do not show the DeployableRegions field, because it does not apply. Controls +that have the value REGIONAL in the Scope field return a value for the DeployableRegions +field, as shown in the example. + +# Arguments +- `control_arn`: The Amazon Resource Name (ARN) of the control. It has one of the following + formats: Global format + arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID} Or Regional format + arn:{PARTITION}:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID} Here is a more + general pattern that covers Amazon Web Services Control Tower and Control Catalog ARNs: + ^arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_-]+ + +""" +function get_control(ControlArn; aws_config::AbstractAWSConfig=global_aws_config()) + return controlcatalog( + "POST", + "/get-control", + Dict{String,Any}("ControlArn" => ControlArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_control( + ControlArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controlcatalog( + "POST", + "/get-control", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ControlArn" => ControlArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_common_controls() list_common_controls(params::Dict{String,<:Any}) @@ -37,6 +83,37 @@ function list_common_controls( ) end +""" + list_controls() + list_controls(params::Dict{String,<:Any}) + +Returns a paginated list of all available controls in the Amazon Web Services Control +Catalog library. Allows you to discover available controls. The list of controls is given +as structures of type controlSummary. The ARN is returned in the global controlcatalog +format, as shown in the examples. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results on a page or for an API request call. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_controls(; aws_config::AbstractAWSConfig=global_aws_config()) + return controlcatalog( + "POST", "/list-controls"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_controls( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controlcatalog( + "POST", + "/list-controls", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_domains() list_domains(params::Dict{String,<:Any}) diff --git a/src/services/controltower.jl b/src/services/controltower.jl index d5f92b0b2e..c7e452f056 100644 --- a/src/services/controltower.jl +++ b/src/services/controltower.jl @@ -144,10 +144,10 @@ end disable_control(control_identifier, target_identifier) disable_control(control_identifier, target_identifier, params::Dict{String,<:Any}) -This API call turns off a control. It starts an asynchronous operation that deletes AWS -resources on the specified organizational unit and the accounts it contains. The resources -will vary according to the control that you specify. For usage examples, see the Amazon -Web Services Control Tower User Guide . +This API call turns off a control. It starts an asynchronous operation that deletes Amazon +Web Services resources on the specified organizational unit and the accounts it contains. +The resources will vary according to the control that you specify. For usage examples, see +the Controls Reference Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective @@ -265,7 +265,7 @@ end This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage -examples, see the Amazon Web Services Control Tower User Guide . +examples, see the Controls Reference Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective @@ -406,7 +406,7 @@ end Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage -examples, see the Amazon Web Services Control Tower User Guide . +examples, see the Controls Reference Guide . # Arguments - `operation_identifier`: The ID of the asynchronous operation, which is used to track @@ -490,8 +490,8 @@ end get_enabled_control(enabled_control_identifier) get_enabled_control(enabled_control_identifier, params::Dict{String,<:Any}) -Retrieves details about an enabled control. For usage examples, see the Amazon Web -Services Control Tower User Guide . +Retrieves details about an enabled control. For usage examples, see the Controls Reference +Guide . # Arguments - `enabled_control_identifier`: The controlIdentifier of the enabled control. @@ -644,7 +644,8 @@ end list_control_operations() list_control_operations(params::Dict{String,<:Any}) -Provides a list of operations in progress or queued. +Provides a list of operations in progress or queued. For usage examples, see +ListControlOperation examples. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -713,12 +714,12 @@ end list_enabled_controls(params::Dict{String,<:Any}) Lists the controls enabled by Amazon Web Services Control Tower on the specified -organizational unit and the accounts it contains. For usage examples, see the Amazon Web -Services Control Tower User Guide . +organizational unit and the accounts it contains. For usage examples, see the Controls +Reference Guide . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"filter"`: An input filter for the ListCEnabledControls API that lets you select the +- `"filter"`: An input filter for the ListEnabledControls API that lets you select the types of control operations to view. - `"maxResults"`: How many results to return per API call. - `"nextToken"`: The token to continue the list from a previous API call with the same @@ -746,6 +747,41 @@ function list_enabled_controls( ) end +""" + list_landing_zone_operations() + list_landing_zone_operations(params::Dict{String,<:Any}) + +Lists all landing zone operations from the past 90 days. Results are sorted by time, with +the most recent operation first. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: An input filter for the ListLandingZoneOperations API that lets you select + the types of landing zone operations to view. +- `"maxResults"`: How many results to return per API call. +- `"nextToken"`: The token to continue the list from a previous API call with the same + parameters. +""" +function list_landing_zone_operations(; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", + "/list-landingzone-operations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_landing_zone_operations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/list-landingzone-operations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_landing_zones() list_landing_zones(params::Dict{String,<:Any}) @@ -781,8 +817,8 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Returns a list of tags associated with the resource. For usage examples, see the Amazon -Web Services Control Tower User Guide . +Returns a list of tags associated with the resource. For usage examples, see the Controls +Reference Guide . # Arguments - `resource_arn`: The ARN of the resource. @@ -861,7 +897,10 @@ end reset_landing_zone(landing_zone_identifier, params::Dict{String,<:Any}) This API call resets a landing zone. It starts an asynchronous operation that resets the -landing zone to the parameters specified in its original configuration. +landing zone to the parameters specified in the original configuration, which you specified +in the manifest file. Nothing in the manifest file's original landing zone configuration is +changed during the reset process, by default. This API is not the same as a rollback of a +landing zone version, which is not a supported operation. # Arguments - `landing_zone_identifier`: The unique identifier of the landing zone. @@ -902,8 +941,7 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Applies tags to a resource. For usage examples, see the Amazon Web Services Control Tower -User Guide . +Applies tags to a resource. For usage examples, see the Controls Reference Guide . # Arguments - `resource_arn`: The ARN of the resource to be tagged. @@ -938,8 +976,7 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes tags from a resource. For usage examples, see the Amazon Web Services Control -Tower User Guide . +Removes tags from a resource. For usage examples, see the Controls Reference Guide . # Arguments - `resource_arn`: The ARN of the resource. @@ -1036,11 +1073,11 @@ end EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request. If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services -Control Tower will update the control to match any valid parameters that you supply. If the +Control Tower updates the control to match any valid parameters that you supply. If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or -you can run an extending governance operation. For usage examples, see the Amazon Web -Services Control Tower User Guide +you can run an extending governance operation. For usage examples, see the Controls +Reference Guide . # Arguments - `enabled_control_identifier`: The ARN of the enabled control that will be updated. @@ -1095,8 +1132,10 @@ specified in the updated manifest file. # Arguments - `landing_zone_identifier`: The unique identifier of the landing zone. -- `manifest`: The manifest JSON file is a text file that describes your Amazon Web Services - resources. For examples, review Launch your landing zone. +- `manifest`: The manifest file (JSON) is a text file that describes your Amazon Web + Services resources. For an example, review Launch your landing zone. The example manifest + file contains each of the available parameters. The schema for the landing zone's JSON + manifest file is not published, by design. - `version`: The landing zone version, for example, 3.2. """ diff --git a/src/services/datazone.jl b/src/services/datazone.jl index 522ae7d8ab..9603eecde6 100644 --- a/src/services/datazone.jl +++ b/src/services/datazone.jl @@ -281,6 +281,70 @@ function create_asset( ) end +""" + create_asset_filter(asset_identifier, configuration, domain_identifier, name) + create_asset_filter(asset_identifier, configuration, domain_identifier, name, params::Dict{String,<:Any}) + +Creates a data asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `configuration`: The configuration of the asset filter. +- `domain_identifier`: The ID of the domain in which you want to create an asset filter. +- `name`: The name of the asset filter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description of the asset filter. +""" +function create_asset_filter( + assetIdentifier, + configuration, + domainIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters", + Dict{String,Any}( + "configuration" => configuration, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_asset_filter( + assetIdentifier, + configuration, + domainIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuration" => configuration, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_asset_revision(domain_identifier, identifier, name) create_asset_revision(domain_identifier, identifier, name, params::Dict{String,<:Any}) @@ -1466,6 +1530,47 @@ function delete_asset( ) end +""" + delete_asset_filter(asset_identifier, domain_identifier, identifier) + delete_asset_filter(asset_identifier, domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes an asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to delete an asset filter. +- `identifier`: The ID of the asset filter that you want to delete. + +""" +function delete_asset_filter( + assetIdentifier, + domainIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_asset_filter( + assetIdentifier, + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_asset_type(domain_identifier, identifier) delete_asset_type(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -2233,6 +2338,47 @@ function get_asset( ) end +""" + get_asset_filter(asset_identifier, domain_identifier, identifier) + get_asset_filter(asset_identifier, domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to get an asset filter. +- `identifier`: The ID of the asset filter. + +""" +function get_asset_filter( + assetIdentifier, + domainIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_asset_filter( + assetIdentifier, + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_asset_type(domain_identifier, identifier) get_asset_type(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -2528,6 +2674,45 @@ function get_environment_blueprint_configuration( ) end +""" + get_environment_credentials(domain_identifier, environment_identifier) + get_environment_credentials(domain_identifier, environment_identifier, params::Dict{String,<:Any}) + +Gets the credentials of an environment in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this environment and + its credentials exist. +- `environment_identifier`: The ID of the environment whose credentials this operation gets. + +""" +function get_environment_credentials( + domainIdentifier, + environmentIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/credentials"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_environment_credentials( + domainIdentifier, + environmentIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/credentials", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_environment_profile(domain_identifier, identifier) get_environment_profile(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -2751,6 +2936,48 @@ function get_iam_portal_login_url( ) end +""" + get_lineage_node(domain_identifier, identifier) + get_lineage_node(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets the data lineage node. + +# Arguments +- `domain_identifier`: The ID of the domain in which you want to get the data lineage node. +- `identifier`: The ID of the data lineage node that you want to get. Both, a lineage node + identifier generated by Amazon DataZone and a sourceIdentifier of the lineage node are + supported. If sourceIdentifier is greater than 1800 characters, you can use lineage node + identifier generated by Amazon DataZone to get the node details. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"timestamp"`: The event time stamp for which you want to get the data lineage node. +""" +function get_lineage_node( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_lineage_node( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_listing(domain_identifier, identifier) get_listing(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -3109,6 +3336,54 @@ function get_user_profile( ) end +""" + list_asset_filters(asset_identifier, domain_identifier) + list_asset_filters(asset_identifier, domain_identifier, params::Dict{String,<:Any}) + +Lists asset filters. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to list asset filters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of asset filters to return in a single call to + ListAssetFilters. When the number of asset filters to be listed is greater than the value + of MaxResults, the response contains a NextToken value that you can use in a subsequent + call to ListAssetFilters to list the next set of asset filters. +- `"nextToken"`: When the number of asset filters is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of asset filters, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListAssetFilters to list the next + set of asset filters. +- `"status"`: The status of the asset filter. +""" +function list_asset_filters( + assetIdentifier, domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_asset_filters( + assetIdentifier, + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_asset_revisions(domain_identifier, identifier) list_asset_revisions(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -3602,6 +3877,62 @@ function list_environments( ) end +""" + list_lineage_node_history(domain_identifier, identifier) + list_lineage_node_history(domain_identifier, identifier, params::Dict{String,<:Any}) + +Lists the history of the specified data lineage node. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to list the history of the + specified data lineage node. +- `identifier`: The ID of the data lineage node whose history you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"direction"`: The direction of the data lineage node refers to the lineage node having + neighbors in that direction. For example, if direction is UPSTREAM, the + ListLineageNodeHistory API responds with historical versions with upstream neighbors only. +- `"maxResults"`: The maximum number of history items to return in a single call to + ListLineageNodeHistory. When the number of memberships to be listed is greater than the + value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListLineageNodeHistory to list the next set of items. +- `"nextToken"`: When the number of history items is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of items, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListLineageNodeHistory to list the + next set of items. +- `"sortOrder"`: The order by which you want data lineage node history to be sorted. +- `"timestampGTE"`: Specifies whether the action is to return data lineage node history + from the time after the event timestamp. +- `"timestampLTE"`: Specifies whether the action is to return data lineage node history + from the time prior of the event timestamp. +""" +function list_lineage_node_history( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)/history"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_lineage_node_history( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)/history", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_metadata_generation_runs(domain_identifier) list_metadata_generation_runs(domain_identifier, params::Dict{String,<:Any}) @@ -4102,6 +4433,54 @@ function list_time_series_data_points( ) end +""" + post_lineage_event(domain_identifier, event) + post_lineage_event(domain_identifier, event, params::Dict{String,<:Any}) + +Posts a data lineage event. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to post a data lineage event. +- `event`: The data lineage event that you want to post. Only open-lineage run event are + supported as events. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function post_lineage_event( + domainIdentifier, event; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/lineage/events", + Dict{String,Any}("event" => event, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function post_lineage_event( + domainIdentifier, + event, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/lineage/events", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("event" => event, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms) post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms, params::Dict{String,<:Any}) @@ -4173,6 +4552,7 @@ Writes the configuration for the specified environment blueprint in Amazon DataZ # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"manageAccessRoleArn"`: The ARN of the manage access role. +- `"provisioningConfigurations"`: The provisioning configuration of a blueprint. - `"provisioningRoleArn"`: The ARN of the provisioning role. - `"regionalParameters"`: The regional parameters in the environment blueprint. """ @@ -4796,6 +5176,52 @@ function untag_resource( ) end +""" + update_asset_filter(asset_identifier, domain_identifier, identifier) + update_asset_filter(asset_identifier, domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates an asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to update an asset filter. +- `identifier`: The ID of the asset filter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"configuration"`: The configuration of the asset filter. +- `"description"`: The description of the asset filter. +- `"name"`: The name of the asset filter. +""" +function update_asset_filter( + assetIdentifier, + domainIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_asset_filter( + assetIdentifier, + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_data_source(domain_identifier, identifier) update_data_source(domain_identifier, identifier, params::Dict{String,<:Any}) diff --git a/src/services/direct_connect.jl b/src/services/direct_connect.jl index 2fe23f9070..1fb6be1354 100644 --- a/src/services/direct_connect.jl +++ b/src/services/direct_connect.jl @@ -69,7 +69,7 @@ end allocate_connection_on_interconnect(bandwidth, connection_name, interconnect_id, owner_account, vlan) allocate_connection_on_interconnect(bandwidth, connection_name, interconnect_id, owner_account, vlan, params::Dict{String,<:Any}) -Deprecated. Use AllocateHostedConnection instead. Creates a hosted connection on an + Deprecated. Use AllocateHostedConnection instead. Creates a hosted connection on an interconnect. Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the specified interconnect. Intended for use by Direct Connect Partners only. @@ -149,9 +149,9 @@ Intended for use by Direct Connect Partners only. # Arguments - `bandwidth`: The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, - 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and 10Gbps. Note that only those - Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, - 2Gbps, 5Gbps or 10Gbps hosted connection. + 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, 10Gbps, and 25Gbps. Note that only + those Direct Connect Partners who have met specific requirements are allowed to create a + 1Gbps, 2Gbps, 5Gbps, 10Gbps, or 25Gbps hosted connection. - `connection_id`: The ID of the interconnect or LAG. - `connection_name`: The name of the hosted connection. - `owner_account`: The ID of the Amazon Web Services account ID of the customer for the @@ -1098,7 +1098,7 @@ the VLAN assigned to them by the Direct Connect Partner. Intended for use by Di Connect Partners only. # Arguments -- `bandwidth`: The port bandwidth, in Gbps. The possible values are 1 and 10. +- `bandwidth`: The port bandwidth, in Gbps. The possible values are 1, 10, and 100. - `interconnect_name`: The name of the interconnect. - `location`: The location of the interconnect. @@ -1155,28 +1155,28 @@ Creates a link aggregation group (LAG) with the specified number of bundled phys dedicated connections between the customer network and a specific Direct Connect location. A LAG is a logical interface that uses the Link Aggregation Control Protocol (LACP) to aggregate multiple interfaces, enabling you to treat them as a single interface. All -connections in a LAG must use the same bandwidth (either 1Gbps or 10Gbps) and must -terminate at the same Direct Connect endpoint. You can have up to 10 dedicated connections -per LAG. Regardless of this limit, if you request more connections for the LAG than Direct -Connect can allocate on a single endpoint, no LAG is created. You can specify an existing -physical dedicated connection or interconnect to include in the LAG (which counts towards -the total number of connections). Doing so interrupts the current physical dedicated -connection, and re-establishes them as a member of the LAG. The LAG will be created on the -same Direct Connect endpoint to which the dedicated connection terminates. Any virtual -interfaces associated with the dedicated connection are automatically disassociated and -re-associated with the LAG. The connection ID does not change. If the Amazon Web Services -account used to create a LAG is a registered Direct Connect Partner, the LAG is -automatically enabled to host sub-connections. For a LAG owned by a partner, any associated -virtual interfaces cannot be directly configured. +connections in a LAG must use the same bandwidth (either 1Gbps, 10Gbps, 100Gbps, or +400Gbps) and must terminate at the same Direct Connect endpoint. You can have up to 10 +dedicated connections per location. Regardless of this limit, if you request more +connections for the LAG than Direct Connect can allocate on a single endpoint, no LAG is +created.. You can specify an existing physical dedicated connection or interconnect to +include in the LAG (which counts towards the total number of connections). Doing so +interrupts the current physical dedicated connection, and re-establishes them as a member +of the LAG. The LAG will be created on the same Direct Connect endpoint to which the +dedicated connection terminates. Any virtual interfaces associated with the dedicated +connection are automatically disassociated and re-associated with the LAG. The connection +ID does not change. If the Amazon Web Services account used to create a LAG is a registered +Direct Connect Partner, the LAG is automatically enabled to host sub-connections. For a LAG +owned by a partner, any associated virtual interfaces cannot be directly configured. # Arguments - `connections_bandwidth`: The bandwidth of the individual physical dedicated connections - bundled by the LAG. The possible values are 1Gbps and 10Gbps. + bundled by the LAG. The possible values are 1Gbps,10Gbps, 100Gbps, and 400Gbps. - `lag_name`: The name of the LAG. - `location`: The location for the LAG. - `number_of_connections`: The number of physical dedicated connections initially provisioned and bundled by the LAG. You can have a maximum of four connections when the - port speed is 1G or 10G, or two when the port speed is 100G. + port speed is 1Gbps or 10Gbps, or two when the port speed is 100Gbps or 400Gbps. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1245,7 +1245,7 @@ gateway or a Virtual Private Gateway (VGW). Connecting the private virtual inter Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different Amazon Web Services Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region. Setting the MTU of a -virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical +virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. @@ -1691,7 +1691,7 @@ end describe_connection_loa(connection_id) describe_connection_loa(connection_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for a connection. The Letter of + Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for a connection. The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at @@ -1763,7 +1763,7 @@ end describe_connections_on_interconnect(interconnect_id) describe_connections_on_interconnect(interconnect_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeHostedConnections instead. Lists the connections that have been + Deprecated. Use DescribeHostedConnections instead. Lists the connections that have been provisioned on the specified interconnect. Intended for use by Direct Connect Partners only. @@ -2017,7 +2017,7 @@ end describe_interconnect_loa(interconnect_id) describe_interconnect_loa(interconnect_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for the specified interconnect. The + Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for the specified interconnect. The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations @@ -2252,8 +2252,10 @@ end describe_virtual_gateways() describe_virtual_gateways(params::Dict{String,<:Any}) -Lists the virtual private gateways owned by the Amazon Web Services account. You can create -one or more Direct Connect private virtual interfaces linked to a virtual private gateway. + Deprecated. Use DescribeVpnGateways instead. See DescribeVPNGateways in the Amazon Elastic +Compute Cloud API Reference. Lists the virtual private gateways owned by the Amazon Web +Services account. You can create one or more Direct Connect private virtual interfaces +linked to a virtual private gateway. """ function describe_virtual_gateways(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2773,7 +2775,7 @@ end update_virtual_interface_attributes(virtual_interface_id, params::Dict{String,<:Any}) Updates the specified attributes of the specified virtual private interface. Setting the -MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying +MTU of a virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call @@ -2787,7 +2789,7 @@ DescribeVirtualInterfaces. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"enableSiteLink"`: Indicates whether to enable or disable SiteLink. - `"mtu"`: The maximum transmission unit (MTU), in bytes. The supported values are 1500 and - 9001. The default value is 1500. + 8500. The default value is 1500. - `"virtualInterfaceName"`: The name of the virtual private interface. """ function update_virtual_interface_attributes( diff --git a/src/services/dynamodb.jl b/src/services/dynamodb.jl index c7c65bfb39..9fc08a5e46 100644 --- a/src/services/dynamodb.jl +++ b/src/services/dynamodb.jl @@ -11,11 +11,11 @@ using AWS.UUIDs This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch -returns at most a single item. The entire batch must consist of either read statements or -write statements, you cannot mix both in one batch. A HTTP 200 response does not mean -that all statements in the BatchExecuteStatement succeeded. Error details for individual -statements can be found under the Error field of the BatchStatementResponse for each -statement. +returns at most a single item. For more information, see Running batch operations with +PartiQL for DynamoDB . The entire batch must consist of either read statements or write +statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all +statements in the BatchExecuteStatement succeeded. Error details for individual statements +can be found under the Error field of the BatchStatementResponse for each statement. # Arguments - `statements`: The list of PartiQL statements representing the batch to run. @@ -165,9 +165,12 @@ internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items -until all items have been processed. If none of the items can be processed due to -insufficient provisioned throughput on all of the tables in the request, then -BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any +until all items have been processed. For tables and indexes with provisioned capacity, if +none of the items can be processed due to insufficient provisioned throughput on all of the +tables in the request, then BatchWriteItem returns a +ProvisionedThroughputExceededException. For all tables and indexes, if none of the items +can be processed due to other throttling scenarios (such as exceeding partition level +limits), then BatchWriteItem returns a ThrottlingException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to @@ -733,11 +736,11 @@ does not exist, DynamoDB returns a ResourceNotFoundException. If table is alread DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the -DELETING state until the table deletion is complete. When you delete a table, any indexes -on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the -corresponding stream on that table goes into the DISABLED state, and the stream is -automatically deleted after 24 hours. Use the DescribeTable action to check the status of -the table. +DELETING state until the table deletion is complete. For the full list of table states, see +TableStatus. When you delete a table, any indexes on that table are also deleted. If you +have DynamoDB Streams enabled on the table, then the corresponding stream on that table +goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use +the DescribeTable action to check the status of the table. # Arguments - `table_name`: The name of the table to delete. You can also provide the Amazon Resource diff --git a/src/services/ec2.jl b/src/services/ec2.jl index 505cf5fd84..f46c081d6d 100644 --- a/src/services/ec2.jl +++ b/src/services/ec2.jl @@ -4108,10 +4108,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"PreserveClientIp"`: Indicates whether your client's IP address is preserved as the - source. The value is true or false. If true, your client's IP address is used when you - connect to a resource. If false, the elastic network interface IP address is used when - you connect to a resource. Default: true +- `"PreserveClientIp"`: Indicates whether the client IP address is preserved as the source. + The following are the possible values. true - Use the client IP address as the source. + false - Use the network interface IP address as the source. Default: false - `"SecurityGroupId"`: One or more security groups to associate with the endpoint. If you don't specify a security group, the default security group for your VPC will be associated with the endpoint. @@ -4343,6 +4342,54 @@ function create_ipam( ) end +""" + create_ipam_external_resource_verification_token(ipam_id) + create_ipam_external_resource_verification_token(ipam_id, params::Dict{String,<:Any}) + +Create a verification token. A verification token is an Amazon Web Services-generated +random value that you can use to prove ownership of an external resource. For example, you +can use a verification token to validate that you control a public IP address range when +you bring an IP address range to Amazon Web Services (BYOIP). + +# Arguments +- `ipam_id`: The ID of the IPAM that will create the token. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. For more information, see Ensuring idempotency. +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"TagSpecification"`: Token tags. +""" +function create_ipam_external_resource_verification_token( + IpamId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "CreateIpamExternalResourceVerificationToken", + Dict{String,Any}("IpamId" => IpamId, "ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_ipam_external_resource_verification_token( + IpamId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "CreateIpamExternalResourceVerificationToken", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("IpamId" => IpamId, "ClientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_ipam_pool(address_family, ipam_scope_id) create_ipam_pool(address_family, ipam_scope_id, params::Dict{String,<:Any}) @@ -4392,13 +4439,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"Locale"`: In IPAM, the locale is the Amazon Web Services Region where you want to make - an IPAM pool available for allocations. Only resources in the same Region as the locale of - the pool can get IP address allocations from the pool. You can only allocate a CIDR for a - VPC, for example, from an IPAM pool that shares a locale with the VPC’s Region. Note that - once you choose a Locale for a pool, you cannot modify it. If you do not choose a locale, - resources in Regions others than the IPAM's home region cannot use CIDRs from this pool. - Possible values: Any Amazon Web Services Region, such as us-east-1. +- `"Locale"`: The locale for the pool should be one of the following: An Amazon Web + Services Region where you want this IPAM pool to be available for allocations. The + network border group for an Amazon Web Services Local Zone where you want this IPAM pool to + be available for allocations (supported Local Zones). This option is only available for + IPAM IPv4 pools in the public scope. If you do not choose a locale, resources in Regions + others than the IPAM's home region cannot use CIDRs from this pool. Possible values: Any + Amazon Web Services Region or supported Amazon Web Services Local Zone. - `"PublicIpSource"`: The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is byoip. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can @@ -5546,6 +5593,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: The Availability Zone (AZ) or Local Zone (LZ) network border + group that the resource that the IP address is assigned to is in. Defaults to an AZ network + border group. For more information on available Local Zones, see Local Zone availability in + the Amazon EC2 User Guide. - `"TagSpecification"`: The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for @@ -7549,7 +7600,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide. -- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. +- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost on which to create the + volume. If you intend to use a volume with an instance running on an outpost, then you must + create the volume on the same outpost as the instance. You can't use a volume created in an + Amazon Web Services Region with an instance on an Amazon Web Services outpost, or the other + way around. - `"Size"`: The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size. The following are the @@ -8748,6 +8803,60 @@ function delete_ipam( ) end +""" + delete_ipam_external_resource_verification_token(ipam_external_resource_verification_token_id) + delete_ipam_external_resource_verification_token(ipam_external_resource_verification_token_id, params::Dict{String,<:Any}) + +Delete a verification token. A verification token is an Amazon Web Services-generated +random value that you can use to prove ownership of an external resource. For example, you +can use a verification token to validate that you control a public IP address range when +you bring an IP address range to Amazon Web Services (BYOIP). + +# Arguments +- `ipam_external_resource_verification_token_id`: The token ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function delete_ipam_external_resource_verification_token( + IpamExternalResourceVerificationTokenId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DeleteIpamExternalResourceVerificationToken", + Dict{String,Any}( + "IpamExternalResourceVerificationTokenId" => + IpamExternalResourceVerificationTokenId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_ipam_external_resource_verification_token( + IpamExternalResourceVerificationTokenId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DeleteIpamExternalResourceVerificationToken", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IpamExternalResourceVerificationTokenId" => + IpamExternalResourceVerificationTokenId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_ipam_pool(ipam_pool_id) delete_ipam_pool(ipam_pool_id, params::Dict{String,<:Any}) @@ -9693,6 +9802,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: The Availability Zone (AZ) or Local Zone (LZ) network border + group that the resource that the IP address is assigned to is in. Defaults to an AZ network + border group. For more information on available Local Zones, see Local Zone availability in + the Amazon EC2 User Guide. """ function delete_public_ipv4_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -14555,6 +14668,49 @@ function describe_ipam_byoasn( ) end +""" + describe_ipam_external_resource_verification_tokens() + describe_ipam_external_resource_verification_tokens(params::Dict{String,<:Any}) + +Describe verification tokens. A verification token is an Amazon Web Services-generated +random value that you can use to prove ownership of an external resource. For example, you +can use a verification token to validate that you control a public IP address range when +you bring an IP address range to Amazon Web Services (BYOIP). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"Filter"`: One or more filters for the request. For more information about filtering, + see Filtering CLI output. Available filters: ipam-arn + ipam-external-resource-verification-token-arn + ipam-external-resource-verification-token-id ipam-id ipam-region state + status token-name token-value +- `"IpamExternalResourceVerificationTokenId"`: Verification token IDs. +- `"MaxResults"`: The maximum number of tokens to return in one page of results. +- `"NextToken"`: The token for the next page of results. +""" +function describe_ipam_external_resource_verification_tokens(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeIpamExternalResourceVerificationTokens"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_ipam_external_resource_verification_tokens( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeIpamExternalResourceVerificationTokens", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_ipam_pools() describe_ipam_pools(params::Dict{String,<:Any}) @@ -15743,8 +15899,11 @@ end describe_placement_groups() describe_placement_groups(params::Dict{String,<:Any}) -Describes the specified placement groups or all of your placement groups. For more -information, see Placement groups in the Amazon EC2 User Guide. +Describes the specified placement groups or all of your placement groups. To describe a +specific placement group that is shared with your account, you must specify the ID of the +placement group using the GroupId parameter. Specifying the name of a shared placement +group using the GroupNames parameter will result in an error. For more information, see +Placement groups in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15762,8 +15921,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"groupName"`: The names of the placement groups. Default: Describes all your placement - groups, or only those otherwise specified. +- `"groupName"`: The names of the placement groups. Constraints: You can specify a name + only if the placement group is owned by your account. If a placement group is shared with + your account, specifying the name results in an error. You must use the GroupId parameter + instead. """ function describe_placement_groups(; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -18215,7 +18376,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys assigned a tag with a specific key, regardless of the tag value. volume-id - The volume ID. volume-type - The Amazon EBS volume type (gp2 | gp3 | io1 | io2 | st1 | sc1| standard) -- `"VolumeId"`: The volume IDs. +- `"VolumeId"`: The volume IDs. If not specified, then all volumes are included in the + response. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -18240,11 +18402,9 @@ end describe_volumes_modifications() describe_volumes_modifications(params::Dict{String,<:Any}) -Describes the most recent volume modification request for the specified EBS volumes. If a -volume has never been modified, some information in the output will be null. If a volume -has been modified more than once, the output includes only the most recent modification -request. For more information, see Monitor the progress of volume modifications in the -Amazon EBS User Guide. +Describes the most recent volume modification request for the specified EBS volumes. For +more information, see Monitor the progress of volume modifications in the Amazon EBS User +Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -21818,13 +21978,8 @@ end Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output -includes the last three system event log errors. By default, the console output returns -buffered information that was posted shortly after an instance transition state (start, -stop, reboot, or terminate). This information is available for at least one hour after the -most recent post. Only the most recent 64 KB of console output is available. You can -optionally retrieve the latest serial console output at any time during the instance -lifecycle. This option is supported on instance types that use the Nitro hypervisor. For -more information, see Instance console output in the Amazon EC2 User Guide. +includes the last three system event log errors. For more information, see Instance console +output in the Amazon EC2 User Guide. # Arguments - `instance_id`: The ID of the instance. @@ -25306,9 +25461,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Virtual Function interface for the instance. There is no way to disable enhanced networking with the Intel 82599 Virtual Function interface at this time. This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable. -- `"userData"`: Changes the instance's user data to the specified value. If you are using - an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and - you can load the text from a file. Otherwise, you must provide base64-encoded text. +- `"userData"`: Changes the instance's user data to the specified value. User data must be + base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might + be performed for you. For more information, see Work with instance user data. - `"value"`: A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute. """ @@ -28455,17 +28610,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys \"Cidr\" is required. This value will be null if you specify \"NetmaskLength\" and will be filled in during the provisioning process. - `"CidrAuthorizationContext"`: A signed document that proves that you are authorized to - bring a specified IP address range to Amazon using BYOIP. This option applies to public - pools only. + bring a specified IP address range to Amazon using BYOIP. This option only applies to IPv4 + and IPv6 pools in the public scope. - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"IpamExternalResourceVerificationTokenId"`: Verification token ID. This option only + applies to IPv4 and IPv6 pools in the public scope. - `"NetmaskLength"`: The netmask length of the CIDR you'd like to provision to a pool. Can be used for provisioning Amazon-provided IPv6 CIDRs to top-level pools and for provisioning CIDRs to pools with source pools. Cannot be used to provision BYOIP CIDRs to top-level pools. Either \"NetmaskLength\" or \"Cidr\" is required. +- `"VerificationMethod"`: The method for verifying control of a public IP address range. + Defaults to remarks-x509 if not specified. This option only applies to IPv4 and IPv6 pools + in the public scope. """ function provision_ipam_pool_cidr( IpamPoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -28516,6 +28676,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: The Availability Zone (AZ) or Local Zone (LZ) network border + group that the resource that the IP address is assigned to is in. Defaults to an AZ network + border group. For more information on available Local Zones, see Local Zone availability in + the Amazon EC2 User Guide. """ function provision_public_ipv4_pool_cidr( IpamPoolId, NetmaskLength, PoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -30951,11 +31115,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys launch. You can specify tags for the following resources only: Instances Volumes Spot Instance requests Network interfaces To tag a resource after it has been created, see CreateTags. -- `"UserData"`: The user data script to make available to the instance. For more - information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User - Guide. If you are using a command line tool, base64-encoding is performed for you, and you - can load the text from a file. Otherwise, you must provide base64-encoded text. User data - is limited to 16 KB. +- `"UserData"`: The user data to make available to the instance. User data must be + base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might + be performed for you. For more information, see Work with instance user data. - `"additionalInfo"`: Reserved. - `"clientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used diff --git a/src/services/ecr.jl b/src/services/ecr.jl index 1b94d9de1c..cf09025945 100644 --- a/src/services/ecr.jl +++ b/src/services/ecr.jl @@ -286,7 +286,6 @@ see Using pull through cache rules in the Amazon Elastic Container Registry User Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - <custom>.azurecr.io - GitLab Container Registry (gitlab-container-registry) - registry.gitlab.com # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -392,6 +391,81 @@ function create_repository( ) end +""" + create_repository_creation_template(applied_for, prefix) + create_repository_creation_template(applied_for, prefix, params::Dict{String,<:Any}) + +Creates a repository creation template. This template is used to define the settings for +repositories created by Amazon ECR on your behalf. For example, repositories created +through pull through cache actions. For more information, see Private repository creation +templates in the Amazon Elastic Container Registry User Guide. + +# Arguments +- `applied_for`: A list of enumerable strings representing the Amazon ECR repository + creation scenarios that this template will apply towards. The two supported scenarios are + PULL_THROUGH_CACHE and REPLICATION +- `prefix`: The repository namespace prefix to associate with the template. All + repositories created using this namespace prefix will have the settings defined in this + template applied. For example, a prefix of prod would apply to all repositories beginning + with prod/. Similarly, a prefix of prod/team would apply to all repositories beginning with + prod/team/. To apply a template to all repositories in your registry that don't have an + associated creation template, you can use ROOT as the prefix. There is always an assumed / + applied to the end of the prefix. If you specify ecr-public as the prefix, Amazon ECR + treats that as ecr-public/. When using a pull through cache rule, the repository prefix you + specify during rule creation is what you should specify as your repository creation + template prefix as well. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customRoleArn"`: The ARN of the role to be assumed by Amazon ECR. This role must be in + the same account as the registry that you are configuring. +- `"description"`: A description for the repository creation template. +- `"encryptionConfiguration"`: The encryption configuration to use for repositories created + using the template. +- `"imageTagMutability"`: The tag mutability setting for the repository. If this parameter + is omitted, the default setting of MUTABLE will be used which will allow image tags to be + overwritten. If IMMUTABLE is specified, all image tags within the repository will be + immutable which will prevent them from being overwritten. +- `"lifecyclePolicy"`: The lifecycle policy to use for repositories created using the + template. +- `"repositoryPolicy"`: The repository policy to apply to repositories created using the + template. A repository policy is a permissions policy associated with a repository to + control access permissions. +- `"resourceTags"`: The metadata to apply to the repository to help you categorize and + organize. Each tag consists of a key and an optional value, both of which you define. Tag + keys can have a maximum character length of 128 characters, and tag values can have a + maximum length of 256 characters. +""" +function create_repository_creation_template( + appliedFor, prefix; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "CreateRepositoryCreationTemplate", + Dict{String,Any}("appliedFor" => appliedFor, "prefix" => prefix); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_repository_creation_template( + appliedFor, + prefix, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ecr( + "CreateRepositoryCreationTemplate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("appliedFor" => appliedFor, "prefix" => prefix), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_lifecycle_policy(repository_name) delete_lifecycle_policy(repository_name, params::Dict{String,<:Any}) @@ -543,6 +617,38 @@ function delete_repository( ) end +""" + delete_repository_creation_template(prefix) + delete_repository_creation_template(prefix, params::Dict{String,<:Any}) + +Deletes a repository creation template. + +# Arguments +- `prefix`: The repository namespace prefix associated with the repository creation + template. + +""" +function delete_repository_creation_template( + prefix; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "DeleteRepositoryCreationTemplate", + Dict{String,Any}("prefix" => prefix); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_repository_creation_template( + prefix, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "DeleteRepositoryCreationTemplate", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("prefix" => prefix), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_repository_policy(repository_name) delete_repository_policy(repository_name, params::Dict{String,<:Any}) @@ -852,6 +958,53 @@ function describe_repositories( ) end +""" + describe_repository_creation_templates() + describe_repository_creation_templates(params::Dict{String,<:Any}) + +Returns details about the repository creation templates in a registry. The prefixes request +parameter can be used to return the details for a specific repository creation template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of repository results returned by + DescribeRepositoryCreationTemplatesRequest in paginated output. When this parameter is + used, DescribeRepositoryCreationTemplatesRequest only returns maxResults results in a + single page along with a nextToken response element. The remaining results of the initial + request can be seen by sending another DescribeRepositoryCreationTemplatesRequest request + with the returned nextToken value. This value can be between 1 and 1000. If this parameter + is not used, then DescribeRepositoryCreationTemplatesRequest returns up to 100 results and + a nextToken value, if applicable. +- `"nextToken"`: The nextToken value returned from a previous paginated + DescribeRepositoryCreationTemplates request where maxResults was used and the results + exceeded the value of that parameter. Pagination continues from the end of the previous + results that returned the nextToken value. This value is null when there are no more + results to return. This token should be treated as an opaque identifier that is only used + to retrieve the next items in a list and not for other programmatic purposes. +- `"prefixes"`: The repository namespace prefixes associated with the repository creation + templates to describe. If this value is not specified, all repository creation templates + are returned. +""" +function describe_repository_creation_templates(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "DescribeRepositoryCreationTemplates"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_repository_creation_templates( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "DescribeRepositoryCreationTemplates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_authorization_token() get_authorization_token(params::Dict{String,<:Any}) @@ -1569,9 +1722,10 @@ configuration for a repository can be retrieved with the DescribeRegistry API ac first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. -When configuring cross-account replication, the destination account must grant the source -account permission to replicate. This permission is controlled using a registry permissions -policy. For more information, see PutRegistryPolicy. +For more information on the custom role for replication, see Creating an IAM role for +replication. When configuring cross-account replication, the destination account must +grant the source account permission to replicate. This permission is controlled using a +registry permissions policy. For more information, see PutRegistryPolicy. # Arguments - `replication_configuration`: An object representing the replication configuration for a @@ -1889,6 +2043,64 @@ function update_pull_through_cache_rule( ) end +""" + update_repository_creation_template(prefix) + update_repository_creation_template(prefix, params::Dict{String,<:Any}) + +Updates an existing repository creation template. + +# Arguments +- `prefix`: The repository namespace prefix that matches an existing repository creation + template in the registry. All repositories created using this namespace prefix will have + the settings defined in this template applied. For example, a prefix of prod would apply to + all repositories beginning with prod/. This includes a repository named prod/team1 as well + as a repository named prod/repository1. To apply a template to all repositories in your + registry that don't have an associated creation template, you can use ROOT as the prefix. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appliedFor"`: Updates the list of enumerable strings representing the Amazon ECR + repository creation scenarios that this template will apply towards. The two supported + scenarios are PULL_THROUGH_CACHE and REPLICATION +- `"customRoleArn"`: The ARN of the role to be assumed by Amazon ECR. This role must be in + the same account as the registry that you are configuring. +- `"description"`: A description for the repository creation template. +- `"encryptionConfiguration"`: +- `"imageTagMutability"`: Updates the tag mutability setting for the repository. If this + parameter is omitted, the default setting of MUTABLE will be used which will allow image + tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository + will be immutable which will prevent them from being overwritten. +- `"lifecyclePolicy"`: Updates the lifecycle policy associated with the specified + repository creation template. +- `"repositoryPolicy"`: Updates the repository policy created using the template. A + repository policy is a permissions policy associated with a repository to control access + permissions. +- `"resourceTags"`: The metadata to apply to the repository to help you categorize and + organize. Each tag consists of a key and an optional value, both of which you define. Tag + keys can have a maximum character length of 128 characters, and tag values can have a + maximum length of 256 characters. +""" +function update_repository_creation_template( + prefix; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "UpdateRepositoryCreationTemplate", + Dict{String,Any}("prefix" => prefix); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_repository_creation_template( + prefix, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "UpdateRepositoryCreationTemplate", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("prefix" => prefix), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ upload_layer_part(layer_part_blob, part_first_byte, part_last_byte, repository_name, upload_id) upload_layer_part(layer_part_blob, part_first_byte, part_last_byte, repository_name, upload_id, params::Dict{String,<:Any}) diff --git a/src/services/eks.jl b/src/services/eks.jl index 0c0fe28615..43f0bbd9cb 100644 --- a/src/services/eks.jl +++ b/src/services/eks.jl @@ -394,6 +394,10 @@ Launching Amazon EKS nodes in the Amazon EKS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"accessConfig"`: The access configuration for the cluster. +- `"bootstrapSelfManagedAddons"`: If you set this value to False when creating a cluster, + the default networking add-ons will not be installed. The default networking addons include + vpc-cni, coredns, and kube-proxy. Use this option when you plan to install third-party + alternative add-ons or self-manage the default networking add-ons. - `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. - `"encryptionConfig"`: The encryption configuration for the cluster. @@ -411,6 +415,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"tags"`: Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. +- `"upgradePolicy"`: New clusters, by default, have extended support enabled. You can + disable extended support when creating a cluster by setting this value to STANDARD. - `"version"`: The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used. The default version might not be the latest version available. @@ -2591,6 +2597,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. - `"resourcesVpcConfig"`: +- `"upgradePolicy"`: You can enable or disable extended support for clusters currently on + standard support. You cannot disable extended support once it starts. You must enable + extended support before your cluster exits standard support. """ function update_cluster_config(name; aws_config::AbstractAWSConfig=global_aws_config()) return eks( diff --git a/src/services/elastic_load_balancing_v2.jl b/src/services/elastic_load_balancing_v2.jl index 32780708d9..3af8d6795a 100644 --- a/src/services/elastic_load_balancing_v2.jl +++ b/src/services/elastic_load_balancing_v2.jl @@ -616,6 +616,49 @@ function delete_rule( ) end +""" + delete_shared_trust_store_association(resource_arn, trust_store_arn) + delete_shared_trust_store_association(resource_arn, trust_store_arn, params::Dict{String,<:Any}) + +Deletes a shared trust store association. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +""" +function delete_shared_trust_store_association( + ResourceArn, TrustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "DeleteSharedTrustStoreAssociation", + Dict{String,Any}("ResourceArn" => ResourceArn, "TrustStoreArn" => TrustStoreArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_shared_trust_store_association( + ResourceArn, + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "DeleteSharedTrustStoreAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceArn" => ResourceArn, "TrustStoreArn" => TrustStoreArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_target_group(target_group_arn) delete_target_group(target_group_arn, params::Dict{String,<:Any}) @@ -1095,7 +1138,7 @@ Describes the health of the specified targets or all of your targets. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Include"`: Used to inclue anomaly detection information. +- `"Include"`: Used to include anomaly detection information. - `"Targets"`: The targets. """ function describe_target_health( @@ -1167,7 +1210,7 @@ end describe_trust_store_revocations(trust_store_arn) describe_trust_store_revocations(trust_store_arn, params::Dict{String,<:Any}) -Describes the revocation files in use by the specified trust store arn, or revocation ID. +Describes the revocation files in use by the specified trust store or revocation files. # Arguments - `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. @@ -1208,7 +1251,7 @@ end describe_trust_stores() describe_trust_stores(params::Dict{String,<:Any}) -Describes all trust stores for a given account by trust store arn’s or name. +Describes all trust stores for the specified account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1234,6 +1277,39 @@ function describe_trust_stores( ) end +""" + get_resource_policy(resource_arn) + get_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Retrieves the resource policy for a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. + +""" +function get_resource_policy(ResourceArn; aws_config::AbstractAWSConfig=global_aws_config()) + return elastic_load_balancing_v2( + "GetResourcePolicy", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_resource_policy( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "GetResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_trust_store_ca_certificates_bundle(trust_store_arn) get_trust_store_ca_certificates_bundle(trust_store_arn, params::Dict{String,<:Any}) @@ -1565,7 +1641,7 @@ end modify_trust_store(ca_certificates_bundle_s3_bucket, ca_certificates_bundle_s3_key, trust_store_arn) modify_trust_store(ca_certificates_bundle_s3_bucket, ca_certificates_bundle_s3_key, trust_store_arn, params::Dict{String,<:Any}) -Update the ca certificate bundle for a given trust store. +Update the ca certificate bundle for the specified trust store. # Arguments - `ca_certificates_bundle_s3_bucket`: The Amazon S3 bucket for the ca certificates bundle. @@ -1810,7 +1886,10 @@ Sets the type of IP addresses used by the subnets of the specified load balancer - `ip_address_type`: Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 - (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load + (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). Note: Application + Load Balancer authentication only supports IPv4 addresses when connecting to an Identity + Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer + cannot complete the authentication process, resulting in HTTP 500 errors. [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible diff --git a/src/services/elasticache.jl b/src/services/elasticache.jl index f1917987c5..3fec282e64 100644 --- a/src/services/elasticache.jl +++ b/src/services/elasticache.jl @@ -253,20 +253,21 @@ end copy_serverless_cache_snapshot(source_serverless_cache_snapshot_name, target_serverless_cache_snapshot_name) copy_serverless_cache_snapshot(source_serverless_cache_snapshot_name, target_serverless_cache_snapshot_name, params::Dict{String,<:Any}) -Creates a copy of an existing serverless cache’s snapshot. Available for Redis only. +Creates a copy of an existing serverless cache’s snapshot. Available for Redis OSS and +Serverless Memcached only. # Arguments - `source_serverless_cache_snapshot_name`: The identifier of the existing serverless - cache’s snapshot to be copied. Available for Redis only. + cache’s snapshot to be copied. Available for Redis OSS and Serverless Memcached only. - `target_serverless_cache_snapshot_name`: The identifier for the snapshot to be created. - Available for Redis only. + Available for Redis OSS and Serverless Memcached only. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"KmsKeyId"`: The identifier of the KMS key used to encrypt the target snapshot. - Available for Redis only. + Available for Redis OSS and Serverless Memcached only. - `"Tags"`: A list of tags to be added to the target snapshot resource. A tag is a - key-value pair. Available for Redis only. Default: NULL + key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL """ function copy_serverless_cache_snapshot( SourceServerlessCacheSnapshotName, @@ -312,34 +313,35 @@ end copy_snapshot(source_snapshot_name, target_snapshot_name) copy_snapshot(source_snapshot_name, target_snapshot_name, params::Dict{String,<:Any}) -Makes a copy of an existing snapshot. This operation is valid for Redis only. Users or -groups that have permissions to use the CopySnapshot operation can create their own Amazon -S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy -to control who has the ability to use the CopySnapshot operation. For more information -about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and -Authentication & Access Control. You could receive the following error messages. -Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: -Create an Amazon S3 bucket in the same region as your snapshot. For more information, see -Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 -bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your -snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache -User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. +Makes a copy of an existing snapshot. This operation is valid for Redis OSS only. Users +or groups that have permissions to use the CopySnapshot operation can create their own +Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM +policy to control who has the ability to use the CopySnapshot operation. For more +information about using IAM to control the use of ElastiCache operations, see Exporting +Snapshots and Authentication & Access Control. You could receive the following error +messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error -Message: The authenticated user does not have sufficient permissions to perform the desired -activity. Solution: Contact your system administrator to get the needed permissions. -Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the -TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively -create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error -Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: -Add List and Read permissions on the bucket. For more information, see Step 2: Grant -ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error -Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. -Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: -Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error -Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. -Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant -ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. +Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same +region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in +the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the +authenticated user. Solution: Create an Amazon S3 bucket in the same region as your +snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache +User Guide. Error Message: The authenticated user does not have sufficient permissions +to perform the desired activity. Solution: Contact your system administrator to get the +needed permissions. Error Message: The S3 bucket %s already contains an object with key +%s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, +you could alternatively create a new Amazon S3 bucket and use this same value for +TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s +on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more +information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the +ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE +permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. +For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the +ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP +permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more +information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the +ElastiCache User Guide. # Arguments - `source_snapshot_name`: The name of an existing snapshot from which to make a copy. @@ -402,8 +404,8 @@ end create_cache_cluster(cache_cluster_id, params::Dict{String,<:Any}) Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine -software, either Memcached or Redis. This operation is not supported for Redis (cluster -mode enabled) clusters. +software, either Memcached or Redis OSS. This operation is not supported for Redis OSS +(cluster mode enabled) clusters. # Arguments - `cache_cluster_id`: The node group (shard) identifier. This parameter is stored as a @@ -423,8 +425,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys special characters are !, &, #, , ^, <, >, and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. -- `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set - this parameter to yes if you want to opt-in to the next auto minor version upgrade +- `"AutoMinorVersionUpgrade"`:  If you are running Redis OSS engine version 6.0 or later, + set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. - `"CacheNodeType"`: The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the @@ -432,15 +434,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region - availability, see Supported Node Types M6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, - cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, + availability, see Supported Node Types M6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge - T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine - version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached + engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node + types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, @@ -450,9 +452,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region - availability, see Supported Node Types R6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, - cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, + availability, see Supported Node Types R6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, @@ -461,9 +463,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. - Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ - with automatic failover is not supported on T1 instances. Redis configuration variables - appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS + Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS + configuration variables appendonly and appendfsync are not supported on Redis OSS version + 2.8.22 and later. - `"CacheParameterGroupName"`: The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a @@ -485,17 +488,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. - `"IpDiscovery"`: The network type you choose when modifying a cluster, either ipv4 | - ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached - engine version 1.6.6 on all instances built on the Nitro system. + ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or + Memcached engine version 1.6.6 on all instances built on the Nitro system. - `"LogDeliveryConfigurations"`: Specifies the destination, format and type of the logs. - `"NetworkType"`: Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances - built on the Nitro system. + using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all + instances built on the Nitro system. - `"NotificationTopicArn"`: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. - `"NumCacheNodes"`: The initial number of cache nodes that the cluster has. For clusters - running Redis, this value must be 1. For clusters running Memcached, this value must be + running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/. @@ -531,12 +534,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). - `"SnapshotArns"`: A single-element string list containing an Amazon Resource Name (ARN) - that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file - is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot - contain any commas. This parameter is only valid if the Engine parameter is redis. + that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot + file is used to populate the node group (shard). The Amazon S3 object name in the ARN + cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb -- `"SnapshotName"`: The name of a Redis snapshot from which to restore data into the new - node group (shard). The snapshot status changes to restoring while the new node group +- `"SnapshotName"`: The name of a Redis OSS snapshot from which to restore data into the + new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. - `"SnapshotRetentionLimit"`: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a @@ -763,11 +766,11 @@ end create_global_replication_group(global_replication_group_id_suffix, primary_replication_group_id) create_global_replication_group(global_replication_group_id_suffix, primary_replication_group_id, params::Dict{String,<:Any}) -Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region -replication. Using Global Datastore for Redis, you can create cross-region read replica -clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across -regions. For more information, see Replication Across Regions Using Global Datastore. -The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The +Global Datastore for Redis OSS offers fully managed, fast, reliable and secure cross-region +replication. Using Global Datastore for Redis OSS, you can create cross-region read replica +clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster recovery +across regions. For more information, see Replication Across Regions Using Global +Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster. @@ -828,29 +831,29 @@ end create_replication_group(replication_group_description, replication_group_id) create_replication_group(replication_group_description, replication_group_id, params::Dict{String,<:Any}) -Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication -group. This API can be used to create a standalone regional replication group or a -secondary replication group associated with a Global datastore. A Redis (cluster mode -disabled) replication group is a collection of nodes, where one of the nodes is a +Creates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) +replication group. This API can be used to create a standalone regional replication group +or a secondary replication group associated with a Global datastore. A Redis OSS (cluster +mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are -asynchronously propagated to the replicas. A Redis cluster-mode enabled cluster is +asynchronously propagated to the replicas. A Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or -shard limit can be increased to a maximum of 500 per cluster if the Redis engine version is -5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges -between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and -no replicas). Make sure there are enough available IP addresses to accommodate the -increase. Common pitfalls include the subnets in the subnet group have too small a CIDR +shard limit can be increased to a maximum of 500 per cluster if the Redis OSS engine +version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster +that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single +primary and no replicas). Make sure there are enough available IP addresses to accommodate +the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per -cluster per instance type. When a Redis (cluster mode disabled) replication group has been -successfully created, you can add one or more read replicas to it, up to a total of 5 read -replicas. If you need to increase or decrease the number of node groups (console: shards), -you can avail yourself of ElastiCache for Redis' scaling. For more information, see Scaling -ElastiCache for Redis Clusters in the ElastiCache User Guide. This operation is valid for -Redis only. +cluster per instance type. When a Redis OSS (cluster mode disabled) replication group has +been successfully created, you can add one or more read replicas to it, up to a total of 5 +read replicas. If you need to increase or decrease the number of node groups (console: +shards), you can use ElastiCache (Redis OSS) scaling. For more information, see Scaling +ElastiCache (Redis OSS) Clusters in the ElastiCache User Guide. This operation is valid +for Redis OSS only. # Arguments - `replication_group_description`: A user-created description for the replication group. @@ -865,7 +868,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a - replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false + replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: + false - `"AuthToken"`: Reserved parameter. The password used to access a password protected server. AuthToken can be specified only on replication groups where TransitEncryptionEnabled is true. For HIPAA compliance, you must specify @@ -875,27 +879,27 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys characters are !, &, #, , ^, <, >, and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. -- `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set - this parameter to yes if you want to opt-in to the next auto minor version upgrade +- `"AutoMinorVersionUpgrade"`:  If you are running Redis OSS engine version 6.0 or later, + set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. - `"AutomaticFailoverEnabled"`: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled - must be enabled for Redis (cluster mode enabled) replication groups. Default: false + must be enabled for Redis OSS (cluster mode enabled) replication groups. Default: false - `"CacheNodeType"`: The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region - availability, see Supported Node Types M6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, - cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, + availability, see Supported Node Types M6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge - T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine - version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached + engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node + types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, @@ -905,9 +909,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region - availability, see Supported Node Types R6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, - cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, + availability, see Supported Node Types R6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, @@ -916,16 +920,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. - Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ - with automatic failover is not supported on T1 instances. Redis configuration variables - appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS + Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS + configuration variables appendonly and appendfsync are not supported on Redis OSS version + 2.8.22 and later. - `"CacheParameterGroupName"`: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the - specified engine is used. If you are running Redis version 3.2.4 or later, only one node - group (shard), and want to use a default parameter group, we recommend that you specify the - parameter group by name. To create a Redis (cluster mode disabled) replication group, - use CacheParameterGroupName=default.redis3.2. To create a Redis (cluster mode enabled) - replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. + specified engine is used. If you are running Redis OSS version 3.2.4 or later, only one + node group (shard), and want to use a default parameter group, we recommend that you + specify the parameter group by name. To create a Redis OSS (cluster mode disabled) + replication group, use CacheParameterGroupName=default.redis3.2. To create a Redis OSS + (cluster mode enabled) replication group, use + CacheParameterGroupName=default.redis3.2.cluster.on. - `"CacheSecurityGroupNames"`: A list of cache security group names to associate with this replication group. - `"CacheSubnetGroupName"`: The name of the cache subnet group to be used for the @@ -933,10 +939,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups. - `"ClusterMode"`: Enabled or Disabled. To modify cluster mode from Disabled to Enabled, - you must first set the cluster mode to Compatible. Compatible mode allows your Redis + you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you - migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode - configuration and set the cluster mode to Enabled. + migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster + mode configuration and set the cluster mode to Enabled. - `"DataTieringEnabled"`: Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering. @@ -951,23 +957,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the earlier engine version. - `"GlobalReplicationGroupId"`: The name of the Global datastore - `"IpDiscovery"`: The network type you choose when creating a replication group, either - ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or + ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. - `"KmsKeyId"`: The ID of the KMS key used to encrypt the disk in the cluster. - `"LogDeliveryConfigurations"`: Specifies the destination, format and type of the logs. - `"MultiAZEnabled"`: A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ. - `"NetworkType"`: Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances - built on the Nitro system. + using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all + instances built on the Nitro system. - `"NodeGroupConfiguration"`: A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, - ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis (cluster mode - disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter - to individually configure each node group (shard), or you can omit this parameter. However, - it is required when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You - must configure each node group (shard) using this parameter because you must specify the - slots for each node group. + ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis OSS (cluster + mode disabled) or a Redis OSS (cluster mode enabled) replication group, you can use this + parameter to individually configure each node group (shard), or you can omit this + parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster + from a S3 rdb file. You must configure each node group (shard) using this parameter because + you must specify the slots for each node group. - `"NotificationTopicArn"`: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. @@ -978,8 +984,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6. The maximum permitted value for NumCacheClusters is 6 (1 primary plus 5 replicas). - `"NumNodeGroups"`: An optional parameter that specifies the number of node groups - (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode - disabled) either omit this parameter or set it to 1. Default: 1 + (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS + (cluster mode disabled) either omit this parameter or set it to 1. Default: 1 - `"Port"`: The port number on which each member of the replication group accepts connections. - `"PreferredCacheClusterAZs"`: A list of EC2 Availability Zones in which the replication @@ -1005,9 +1011,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC). - `"ServerlessCacheSnapshotName"`: The name of the snapshot used to create a replication - group. Available for Redis only. + group. Available for Redis OSS only. - `"SnapshotArns"`: A list of Amazon Resource Names (ARN) that uniquely identify the Redis - RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new + OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration @@ -1031,13 +1037,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only - available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x - or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled - as true, an AuthToken, and a CacheSubnetGroup. + available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, + 4.x or later. Default: false For HIPAA compliance, you must specify + TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. - `"TransitEncryptionMode"`: A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both - encrypted and unencrypted connections at the same time. Once you migrate all your Redis + encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can @@ -1100,7 +1106,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Units for the cache. - `"DailySnapshotTime"`: The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be - created on an automatic daily basis. Available for Redis only. + created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only. - `"Description"`: User-provided description for the serverless cache. The default is NULL, i.e. if no description is provided then an empty string will be returned. The maximum length is 255 characters. @@ -1113,16 +1119,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys end-point (private-link). If no other information is given this will be the VPC’s Default Security Group that is associated with the cluster VPC end-point. - `"SnapshotArnsToRestore"`: The ARN(s) of the snapshot that the new serverless cache will - be created from. Available for Redis only. + be created from. Available for Redis OSS and Serverless Memcached only. - `"SnapshotRetentionLimit"`: The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the - oldest snapshots will be deleted on a rolling basis. Available for Redis only. + oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless + Memcached only. - `"SubnetIds"`: A list of the identifiers of the subnets where the VPC endpoint for the serverless cache will be deployed. All the subnetIds must belong to the same VPC. - `"Tags"`: The list of tags (key, value) pairs to be added to the serverless cache resource. Default is NULL. - `"UserGroupId"`: The identifier of the UserGroup to be associated with the serverless - cache. Available for Redis only. Default is NULL. + cache. Available for Redis OSS only. Default is NULL. """ function create_serverless_cache( Engine, ServerlessCacheName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1161,20 +1168,21 @@ end create_serverless_cache_snapshot(serverless_cache_name, serverless_cache_snapshot_name, params::Dict{String,<:Any}) This API creates a copy of an entire ServerlessCache at a specific moment in time. -Available for Redis only. +Available for Redis OSS and Serverless Memcached only. # Arguments - `serverless_cache_name`: The name of an existing serverless cache. The snapshot is - created from this cache. Available for Redis only. + created from this cache. Available for Redis OSS and Serverless Memcached only. - `serverless_cache_snapshot_name`: The name for the snapshot being created. Must be unique - for the customer account. Available for Redis only. Must be between 1 and 255 characters. + for the customer account. Available for Redis OSS and Serverless Memcached only. Must be + between 1 and 255 characters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"KmsKeyId"`: The ID of the KMS key used to encrypt the snapshot. Available for Redis - only. Default: NULL +- `"KmsKeyId"`: The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS + and Serverless Memcached only. Default: NULL - `"Tags"`: A list of tags to be added to the snapshot resource. A tag is a key-value pair. - Available for Redis only. + Available for Redis OSS and Serverless Memcached only. """ function create_serverless_cache_snapshot( ServerlessCacheName, @@ -1219,7 +1227,7 @@ end create_snapshot(snapshot_name, params::Dict{String,<:Any}) Creates a copy of an entire cluster or replication group at a specific moment in time. -This operation is valid for Redis only. +This operation is valid for Redis OSS only. # Arguments - `snapshot_name`: A name for the snapshot being created. @@ -1261,8 +1269,8 @@ end create_user(access_string, engine, user_id, user_name) create_user(access_string, engine, user_id, user_name, params::Dict{String,<:Any}) -For Redis engine version 6.0 onwards: Creates a Redis user. For more information, see Using -Role Based Access Control (RBAC). +For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, +see Using Role Based Access Control (RBAC). # Arguments - `access_string`: Access permissions string used for this user. @@ -1329,17 +1337,18 @@ end create_user_group(engine, user_group_id) create_user_group(engine, user_group_id, params::Dict{String,<:Any}) -For Redis engine version 6.0 onwards: Creates a Redis user group. For more information, see -Using Role Based Access Control (RBAC) +For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more +information, see Using Role Based Access Control (RBAC) # Arguments -- `engine`: The current supported value is Redis. +- `engine`: The current supported value is Redis user. - `user_group_id`: The ID of the user group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Tags"`: A list of tags to be added to this resource. A tag is a key-value pair. A tag - key must be accompanied by a tag value, although null is accepted. Available for Redis only. + key must be accompanied by a tag value, although null is accepted. Available for Redis OSS + only. - `"UserIds"`: The list of user IDs that belong to the user group. """ function create_user_group( @@ -1390,12 +1399,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"GlobalNodeGroupsToRemove"`: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. - ElastiCache for Redis will attempt to remove all node groups listed by + ElastiCache (Redis OSS) will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster. - `"GlobalNodeGroupsToRetain"`: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. - ElastiCache for Redis will attempt to retain all node groups listed by + ElastiCache (Redis OSS) will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster. """ function decrease_node_groups_in_global_replication_group( @@ -1444,10 +1453,10 @@ end decrease_replica_count(apply_immediately, replication_group_id) decrease_replica_count(apply_immediately, replication_group_id, params::Dict{String,<:Any}) -Dynamically decreases the number of replicas in a Redis (cluster mode disabled) replication -group or the number of replica nodes in one or more node groups (shards) of a Redis -(cluster mode enabled) replication group. This operation is performed with no cluster down -time. +Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled) +replication group or the number of replica nodes in one or more node groups (shards) of a +Redis OSS (cluster mode enabled) replication group. This operation is performed with no +cluster down time. # Arguments - `apply_immediately`: If True, the number of replica nodes is decreased immediately. @@ -1458,15 +1467,15 @@ time. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"NewReplicaCount"`: The number of read replica nodes you want at the completion of this - operation. For Redis (cluster mode disabled) replication groups, this is the number of - replica nodes in the replication group. For Redis (cluster mode enabled) replication + operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of + replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. - The minimum number of replicas in a shard or replication group is: Redis (cluster mode - disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis (cluster - mode enabled): 0 (though you will not be able to failover to a replica if your primary node - fails) + The minimum number of replicas in a shard or replication group is: Redis OSS (cluster + mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis OSS + (cluster mode enabled): 0 (though you will not be able to failover to a replica if your + primary node fails) - `"ReplicaConfiguration"`: A list of ConfigureShard objects that can be used to configure - each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has + each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. - `"ReplicasToRemove"`: A list of the node ids to remove from the replication group or node group (shard). @@ -1514,10 +1523,10 @@ end Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot -cancel or revert this operation. This operation is not valid for: Redis (cluster mode -enabled) clusters Redis (cluster mode disabled) clusters A cluster that is the last +cancel or revert this operation. This operation is not valid for: Redis OSS (cluster mode +enabled) clusters Redis OSS (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication -group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis +group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis OSS (cluster mode enabled) replication group A cluster that is not in the available state # Arguments @@ -1748,8 +1757,9 @@ replication group, including the primary/primaries and all of the read replicas. replication group has only one primary, you can optionally delete only the read replicas, while retaining the primary by setting RetainPrimaryCluster=true. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the -selected resources; you cannot cancel or revert this operation. This operation is valid -for Redis only. +selected resources; you cannot cancel or revert this operation. CreateSnapshot +permission is required to create a final snapshot. Without this permission, the API call +will fail with an Access Denied exception. This operation is valid for Redis OSS only. # Arguments - `replication_group_id`: The identifier for the cluster to be deleted. This parameter is @@ -1795,7 +1805,9 @@ end delete_serverless_cache(serverless_cache_name) delete_serverless_cache(serverless_cache_name, params::Dict{String,<:Any}) -Deletes a specified existing serverless cache. +Deletes a specified existing serverless cache. CreateServerlessCacheSnapshot permission +is required to create a final snapshot. Without this permission, the API call will fail +with an Access Denied exception. # Arguments - `serverless_cache_name`: The identifier of the serverless cache to be deleted. @@ -1803,7 +1815,8 @@ Deletes a specified existing serverless cache. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"FinalSnapshotName"`: Name of the final snapshot to be taken before the serverless cache - is deleted. Available for Redis only. Default: NULL, i.e. a final snapshot is not taken. + is deleted. Available for Redis OSS and Serverless Memcached only. Default: NULL, i.e. a + final snapshot is not taken. """ function delete_serverless_cache( ServerlessCacheName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1838,11 +1851,12 @@ end delete_serverless_cache_snapshot(serverless_cache_snapshot_name) delete_serverless_cache_snapshot(serverless_cache_snapshot_name, params::Dict{String,<:Any}) -Deletes an existing serverless cache snapshot. Available for Redis only. +Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless +Memcached only. # Arguments - `serverless_cache_snapshot_name`: Idenfitier of the snapshot to be deleted. Available for - Redis only. + Redis OSS and Serverless Memcached only. """ function delete_serverless_cache_snapshot( @@ -1882,7 +1896,7 @@ end Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this -operation. This operation is valid for Redis only. +operation. This operation is valid for Redis OSS only. # Arguments - `snapshot_name`: The name of the snapshot to be deleted. @@ -1915,7 +1929,7 @@ end delete_user(user_id) delete_user(user_id, params::Dict{String,<:Any}) -For Redis engine version 6.0 onwards: Deletes a user. The user will be removed from all +For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC). @@ -1946,8 +1960,8 @@ end delete_user_group(user_group_id) delete_user_group(user_group_id, params::Dict{String,<:Any}) -For Redis engine version 6.0 onwards: Deletes a user group. The user group must first be -disassociated from the replication group before it can be deleted. For more information, +For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first +be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC). # Arguments @@ -2007,7 +2021,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. - `"ShowCacheClustersNotInReplicationGroups"`: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of - a replication group. In practice, this mean Memcached and single node Redis clusters. + a replication group. In practice, this mean Memcached and single node Redis OSS clusters. - `"ShowCacheNodeInfo"`: An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes. """ @@ -2360,7 +2374,7 @@ end Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation -is valid for Redis only. +is valid for Redis OSS only. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2406,38 +2420,39 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types - M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached - engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node - types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for - Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): - cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, - cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, - cache.t2.medium Previous generation: (not recommended. Existing clusters are still - supported but creation of new clusters is not supported for these types.) T1 node types: - cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing - clusters are still supported but creation of new clusters is not supported for these - types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g - node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, - cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see - Supported Node Types R6g node types (available only for Redis engine version 5.0.6 - onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, - cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, - cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, - cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - Previous generation: (not recommended. Existing clusters are still supported but creation - of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All - current generation instance types are created in Amazon VPC by default. Redis append-only - files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic - failover is not supported on T1 instances. Redis configuration variables appendonly and - appendfsync are not supported on Redis version 2.8.22 and later. + M6g node types (available only for Redis OSS engine version 5.0.6 onward and for + Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, + cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types + (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version + 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: + cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for these types.) T1 + node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not + recommended. Existing clusters are still supported but creation of new clusters is not + supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: + Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, + cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region + availability, see Supported Node Types R6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, + cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, + cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still + supported but creation of new clusters is not supported for these types.) M2 node types: + cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, + cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node + type info All current generation instance types are created in Amazon VPC by default. + Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS + Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS + configuration variables appendonly and appendfsync are not supported on Redis OSS version + 2.8.22 and later. - `"Duration"`: The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration. Valid Values: 1 | 3 | 31536000 | 94608000 @@ -2489,38 +2504,39 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types - M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached - engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node - types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for - Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): - cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, - cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, - cache.t2.medium Previous generation: (not recommended. Existing clusters are still - supported but creation of new clusters is not supported for these types.) T1 node types: - cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing - clusters are still supported but creation of new clusters is not supported for these - types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g - node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, - cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see - Supported Node Types R6g node types (available only for Redis engine version 5.0.6 - onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, - cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, - cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, - cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - Previous generation: (not recommended. Existing clusters are still supported but creation - of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All - current generation instance types are created in Amazon VPC by default. Redis append-only - files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic - failover is not supported on T1 instances. Redis configuration variables appendonly and - appendfsync are not supported on Redis version 2.8.22 and later. + M6g node types (available only for Redis OSS engine version 5.0.6 onward and for + Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, + cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types + (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version + 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: + cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for these types.) T1 + node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not + recommended. Existing clusters are still supported but creation of new clusters is not + supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: + Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, + cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region + availability, see Supported Node Types R6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, + cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, + cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still + supported but creation of new clusters is not supported for these types.) M2 node types: + cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, + cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node + type info All current generation instance types are created in Amazon VPC by default. + Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS + Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS + configuration variables appendonly and appendfsync are not supported on Redis OSS version + 2.8.22 and later. - `"Duration"`: Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration. Valid Values: 1 | 3 | 31536000 | 94608000 - `"Marker"`: An optional marker returned from a prior request. Use this marker for @@ -2566,24 +2582,26 @@ end Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for -Redis only. +Redis OSS and Serverless Memcached only. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that - remaining results can be retrieved. Available for Redis only.The default is 50. The - Validation Constraints are a maximum of 50. + remaining results can be retrieved. Available for Redis OSS and Serverless Memcached + only.The default is 50. The Validation Constraints are a maximum of 50. - `"NextToken"`: An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis - only. + OSS and Serverless Memcached only. - `"ServerlessCacheName"`: The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. - Available for Redis only. + Available for Redis OSS and Serverless Memcached only. - `"ServerlessCacheSnapshotName"`: The identifier of the serverless cache’s snapshot. If - this parameter is specified, only this snapshot is described. Available for Redis only. -- `"SnapshotType"`: The type of snapshot that is being described. Available for Redis only. + this parameter is specified, only this snapshot is described. Available for Redis OSS and + Serverless Memcached only. +- `"SnapshotType"`: The type of snapshot that is being described. Available for Redis OSS + and Serverless Memcached only. """ function describe_serverless_cache_snapshots(; aws_config::AbstractAWSConfig=global_aws_config() @@ -2678,7 +2696,7 @@ end Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation -is valid for Redis only. +is valid for Redis OSS only. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2722,7 +2740,8 @@ Returns details of the update actions # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CacheClusterIds"`: The cache cluster IDs -- `"Engine"`: The Elasticache engine to which the update applies. Either Redis or Memcached +- `"Engine"`: The Elasticache engine to which the update applies. Either Redis OSS or + Memcached. - `"Marker"`: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -2789,7 +2808,7 @@ Returns a list of users. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Engine"`: The Redis engine. +- `"Engine"`: The Redis OSS engine. - `"Filters"`: Filter to determine the list of User IDs to return. - `"Marker"`: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response @@ -2875,13 +2894,13 @@ end export_serverless_cache_snapshot(s3_bucket_name, serverless_cache_snapshot_name, params::Dict{String,<:Any}) Provides the functionality to export the serverless cache snapshot data to Amazon S3. -Available for Redis only. +Available for Redis OSS only. # Arguments - `s3_bucket_name`: Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 - bucket must also be in same region as the snapshot. Available for Redis only. + bucket must also be in same region as the snapshot. Available for Redis OSS only. - `serverless_cache_snapshot_name`: The identifier of the serverless cache snapshot to be - exported to S3. Available for Redis only. + exported to S3. Available for Redis OSS only. """ function export_serverless_cache_snapshot( @@ -3040,10 +3059,10 @@ end increase_replica_count(apply_immediately, replication_group_id) increase_replica_count(apply_immediately, replication_group_id, params::Dict{String,<:Any}) -Dynamically increases the number of replicas in a Redis (cluster mode disabled) replication -group or the number of replica nodes in one or more node groups (shards) of a Redis -(cluster mode enabled) replication group. This operation is performed with no cluster down -time. +Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled) +replication group or the number of replica nodes in one or more node groups (shards) of a +Redis OSS (cluster mode enabled) replication group. This operation is performed with no +cluster down time. # Arguments - `apply_immediately`: If True, the number of replica nodes is increased immediately. @@ -3054,11 +3073,11 @@ time. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"NewReplicaCount"`: The number of read replica nodes you want at the completion of this - operation. For Redis (cluster mode disabled) replication groups, this is the number of - replica nodes in the replication group. For Redis (cluster mode enabled) replication + operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of + replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. - `"ReplicaConfiguration"`: A list of ConfigureShard objects that can be used to configure - each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has + each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. """ function increase_replica_count( @@ -3101,7 +3120,7 @@ end list_allowed_node_type_modifications() list_allowed_node_type_modifications(params::Dict{String,<:Any}) -Lists all available node types that you can scale your Redis cluster's or replication +Lists all available node types that you can scale your Redis OSS cluster's or replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation. @@ -3216,9 +3235,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users - with Redis AUTH -- `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set - this parameter to yes if you want to opt-in to the next auto minor version upgrade + with Redis OSS AUTH +- `"AutoMinorVersionUpgrade"`:  If you are running Redis OSS engine version 6.0 or later, + set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. - `"CacheNodeIdsToRemove"`: A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less than @@ -3243,8 +3262,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. - `"IpDiscovery"`: The network type you choose when modifying a cluster, either ipv4 | - ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached - engine version 1.6.6 on all instances built on the Nitro system. + ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or + Memcached engine version 1.6.6 on all instances built on the Nitro system. - `"LogDeliveryConfigurations"`: Specifies the destination, format and type of the logs. - `"NewAvailabilityZones"`: This option is only supported on Memcached clusters. The list of Availability Zones where the new Memcached cache nodes are created. This parameter is @@ -3282,8 +3301,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter - to provide the IDs of the specific cache nodes to remove. For clusters running Redis, this - value must be 1. For clusters running Memcached, this value must be between 1 and 40. + to provide the IDs of the specific cache nodes to remove. For clusters running Redis OSS, + this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in @@ -3507,10 +3526,10 @@ end modify_replication_group(replication_group_id) modify_replication_group(replication_group_id, params::Dict{String,<:Any}) -Modifies the settings for a replication group. This is limited to Redis 7 and newer. -Scaling for Amazon ElastiCache for Redis (cluster mode enabled) in the ElastiCache User +Modifies the settings for a replication group. This is limited to Redis OSS 7 and newer. +Scaling for Amazon ElastiCache (Redis OSS) (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This -operation is valid for Redis only. +operation is valid for Redis OSS only. # Arguments - `replication_group_id`: The identifier of the replication group to modify. @@ -3532,9 +3551,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users - with Redis AUTH -- `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set - this parameter to yes if you want to opt-in to the next auto minor version upgrade + with Redis OSS AUTH +- `"AutoMinorVersionUpgrade"`:  If you are running Redis OSS engine version 6.0 or later, + set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. - `"AutomaticFailoverEnabled"`: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. Valid values: true | @@ -3551,18 +3570,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys running outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default. - `"ClusterMode"`: Enabled or Disabled. To modify cluster mode from Disabled to Enabled, - you must first set the cluster mode to Compatible. Compatible mode allows your Redis + you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you - migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode - configuration and set the cluster mode to Enabled. + migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster + mode configuration and set the cluster mode to Enabled. - `"EngineVersion"`: The upgraded version of the cache engine to be run on the clusters in the replication group. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version. - `"IpDiscovery"`: The network type you choose when modifying a cluster, either ipv4 | - ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached - engine version 1.6.6 on all instances built on the Nitro system. + ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or + Memcached engine version 1.6.6 on all instances built on the Nitro system. - `"LogDeliveryConfigurations"`: Specifies the destination, format and type of the logs. - `"MultiAZEnabled"`: A flag to indicate MultiAZ is enabled. - `"NodeGroupId"`: Deprecated. This parameter is not used. @@ -3597,7 +3616,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. - `"SnapshottingClusterId"`: The cluster ID that is used as the daily snapshot source for - the replication group. This parameter cannot be set for Redis (cluster mode enabled) + the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups. - `"TransitEncryptionEnabled"`: A flag that enables in-transit encryption when set to true. If you are enabling in-transit encryption for an existing cluster, you must also set @@ -3606,10 +3625,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all - your Redis clients to use encrypted connections you can set the value to required to allow - encrypted connections only. Setting TransitEncryptionMode to required is a two-step process - that requires you to first set the TransitEncryptionMode to preferred, after that you can - set TransitEncryptionMode to required. + your Redis OSS clients to use encrypted connections you can set the value to required to + allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step + process that requires you to first set the TransitEncryptionMode to preferred, after that + you can set TransitEncryptionMode to required. - `"UserGroupIdsToAdd"`: The ID of the user group you are associating with the replication group. - `"UserGroupIdsToRemove"`: The ID of the user group to disassociate from the replication @@ -3654,19 +3673,19 @@ shards, or rebalance the keyspaces among existing shards. At present, the only permitted value for this parameter is true. Value: true - `node_group_count`: The number of node groups (shards) that results from the modification of the shard configuration. -- `replication_group_id`: The name of the Redis (cluster mode enabled) cluster (replication - group) on which the shards are to be configured. +- `replication_group_id`: The name of the Redis OSS (cluster mode enabled) cluster + (replication group) on which the shards are to be configured. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"NodeGroupsToRemove"`: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. - NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for - Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. + NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis + OSS) will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. - `"NodeGroupsToRetain"`: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. - NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache for - Redis will attempt to remove all node groups except those listed by NodeGroupsToRetain from + NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache (Redis + OSS) will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster. - `"ReshardingConfiguration"`: Specifies the preferred availability zones for each node group in the cluster. If the value of NodeGroupCount is greater than the current number of @@ -3730,23 +3749,25 @@ This API modifies the attributes of a serverless cache. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CacheUsageLimits"`: Modify the cache usage limit for the serverless cache. - `"DailySnapshotTime"`: The daily time during which Elasticache begins taking a daily - snapshot of the serverless cache. Available for Redis only. The default is NULL, i.e. the - existing snapshot time configured for the cluster is not removed. + snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only. + The default is NULL, i.e. the existing snapshot time configured for the cluster is not + removed. - `"Description"`: User provided description for the serverless cache. Default = NULL, i.e. the existing description is not removed/modified. The description has a maximum length of 255 characters. - `"RemoveUserGroup"`: The identifier of the UserGroup to be removed from association with - the Redis serverless cache. Available for Redis only. Default is NULL. + the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL. - `"SecurityGroupIds"`: The new list of VPC security groups to be associated with the serverless cache. Populating this list means the current VPC security groups will be removed. This security group is used to authorize traffic access for the VPC end-point (private-link). Default = NULL - the existing list of VPC security groups is not removed. - `"SnapshotRetentionLimit"`: The number of days for which Elasticache retains automatic - snapshots before deleting them. Available for Redis only. Default = NULL, i.e. the existing - snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 - days. + snapshots before deleting them. Available for Redis OSS and Serverless Memcached only. + Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. + The maximum value allowed is 35 days. - `"UserGroupId"`: The identifier of the UserGroup to be associated with the serverless - cache. Available for Redis only. Default is NULL - the existing UserGroup is not removed. + cache. Available for Redis OSS only. Default is NULL - the existing UserGroup is not + removed. """ function modify_serverless_cache( ServerlessCacheName; aws_config::AbstractAWSConfig=global_aws_config() @@ -3856,7 +3877,7 @@ end Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved -Nodes for Redis or Managing Costs with Reserved Nodes for Memcached. +Nodes for Redis OSS or Managing Costs with Reserved Nodes for Memcached. # Arguments - `reserved_cache_nodes_offering_id`: The ID of the reserved cache node offering to @@ -3961,9 +3982,9 @@ applies any modified cache parameter groups to the cluster. The reboot operation place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster -event is created. Rebooting a cluster is currently supported on Memcached and Redis -(cluster mode disabled) clusters. Rebooting is not supported on Redis (cluster mode -enabled) clusters. If you make changes to parameters that require a Redis (cluster mode +event is created. Rebooting a cluster is currently supported on Memcached and Redis OSS +(cluster mode disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode +enabled) clusters. If you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process. @@ -4176,7 +4197,7 @@ Start the migration of data. # Arguments - `customer_node_endpoint_list`: List of endpoints from which data should be migrated. For - Redis (cluster mode disabled), list should have only one element. + Redis OSS (cluster mode disabled), list should have only one element. - `replication_group_id`: The ID of the replication group to which data should be migrated. """ @@ -4232,7 +4253,7 @@ following A customer can use this operation to test automatic failover on up shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this -operation multiple times on different shards in the same Redis (cluster mode enabled) +operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the diff --git a/src/services/entityresolution.jl b/src/services/entityresolution.jl index 6ad0a84154..fb58e8cb54 100644 --- a/src/services/entityresolution.jl +++ b/src/services/entityresolution.jl @@ -17,7 +17,9 @@ GetPolicy API. - `arn`: The Amazon Resource Name (ARN) of the resource that will be accessed by the principal. - `effect`: Determines whether the permissions specified in the policy are to be allowed - (Allow) or denied (Deny). + (Allow) or denied (Deny). If you set the value of the effect parameter to Deny for the + AddPolicyStatement operation, you must also set the value of the effect parameter in the + policy to Deny for the PutPolicy operation. - `principal`: The Amazon Web Services service or Amazon Web Services account that can access the resource defined as ARN. - `statement_id`: A statement identifier that differentiates the statement from others in @@ -116,20 +118,18 @@ function batch_delete_unique_id( end """ - create_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name) - create_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name, params::Dict{String,<:Any}) + create_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name) + create_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name, params::Dict{String,<:Any}) Creates an IdMappingWorkflow object which stores the configuration of the data processing job to be run. Each IdMappingWorkflow must have a unique workflow name. To modify an existing workflow, use the UpdateIdMappingWorkflow API. # Arguments -- `id_mapping_techniques`: An object which defines the idMappingType and the - providerProperties. +- `id_mapping_techniques`: An object which defines the ID mapping technique and any + additional configurations. - `input_source_config`: A list of InputSource objects, which have the fields InputSourceARN and SchemaName. -- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes - this role to create resources on your behalf as part of workflow execution. - `workflow_name`: The name of the workflow. There can't be multiple IdMappingWorkflows with the same name. @@ -138,12 +138,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"description"`: A description of the workflow. - `"outputSourceConfig"`: A list of IdMappingWorkflowOutputSource objects, each of which contains fields OutputS3Path and Output. +- `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to create resources on your behalf as part of workflow execution. - `"tags"`: The tags used to organize, track, or control access for this resource. """ function create_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -153,7 +154,6 @@ function create_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, "workflowName" => workflowName, ); aws_config=aws_config, @@ -163,7 +163,6 @@ end function create_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -177,7 +176,6 @@ function create_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, "workflowName" => workflowName, ), params, @@ -1131,7 +1129,9 @@ Updates the resource-based policy. # Arguments - `arn`: The Amazon Resource Name (ARN) of the resource for which the policy needs to be updated. -- `policy`: The resource-based policy. +- `policy`: The resource-based policy. If you set the value of the effect parameter in the + policy to Deny for the PutPolicy operation, you must also set the value of the effect + parameter to Deny for the AddPolicyStatement operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1314,20 +1314,18 @@ function untag_resource( end """ - update_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name) - update_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name, params::Dict{String,<:Any}) + update_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name) + update_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name, params::Dict{String,<:Any}) Updates an existing IdMappingWorkflow. This method is identical to CreateIdMappingWorkflow, except it uses an HTTP PUT request instead of a POST request, and the IdMappingWorkflow must already exist for the method to succeed. # Arguments -- `id_mapping_techniques`: An object which defines the idMappingType and the - providerProperties. +- `id_mapping_techniques`: An object which defines the ID mapping technique and any + additional configurations. - `input_source_config`: A list of InputSource objects, which have the fields InputSourceARN and SchemaName. -- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes - this role to access Amazon Web Services resources on your behalf. - `workflow_name`: The name of the workflow. # Optional Parameters @@ -1335,11 +1333,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"description"`: A description of the workflow. - `"outputSourceConfig"`: A list of OutputSource objects, each of which contains fields OutputS3Path and KMSArn. +- `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to access Amazon Web Services resources on your behalf. """ function update_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -1349,7 +1348,6 @@ function update_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1358,7 +1356,6 @@ end function update_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1372,7 +1369,6 @@ function update_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, ), params, ), diff --git a/src/services/firehose.jl b/src/services/firehose.jl index d50f06233d..81040a0147 100644 --- a/src/services/firehose.jl +++ b/src/services/firehose.jl @@ -77,6 +77,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys only one destination. - `"HttpEndpointDestinationConfiguration"`: Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint destination. You can specify only one destination. +- `"IcebergDestinationConfiguration"`: Configure Apache Iceberg Tables destination. + Amazon Data Firehose is in preview release and is subject to change. - `"KinesisStreamSourceConfiguration"`: When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream. @@ -744,6 +746,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ExtendedS3DestinationUpdate"`: Describes an update for a destination in Amazon S3. - `"HttpEndpointDestinationUpdate"`: Describes an update to the specified HTTP endpoint destination. +- `"IcebergDestinationUpdate"`: Describes an update for a destination in Apache Iceberg + Tables. Amazon Data Firehose is in preview release and is subject to change. - `"RedshiftDestinationUpdate"`: Describes an update for a destination in Amazon Redshift. - `"S3DestinationUpdate"`: [Deprecated] Describes an update for a destination in Amazon S3. - `"SnowflakeDestinationUpdate"`: Update to the Snowflake destination configuration diff --git a/src/services/glue.jl b/src/services/glue.jl index 381075a80b..7aba9bc4ab 100644 --- a/src/services/glue.jl +++ b/src/services/glue.jl @@ -4194,6 +4194,8 @@ Retrieves all databases defined in a given Data Catalog. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttributesToGet"`: Specifies the database fields returned by the GetDatabases call. + This parameter doesn’t accept an empty list. The request must include the NAME. - `"CatalogId"`: The ID of the Data Catalog from which to retrieve Databases. If none is provided, the Amazon Web Services account ID is used by default. - `"MaxResults"`: The maximum number of databases to return in one response. diff --git a/src/services/iotsitewise.jl b/src/services/iotsitewise.jl index 69d92c5fbf..81cc8f26d5 100644 --- a/src/services/iotsitewise.jl +++ b/src/services/iotsitewise.jl @@ -559,7 +559,7 @@ reusable component that you can include in the composite models of other asset m can't create assets directly from this type of asset model. # Arguments -- `asset_model_name`: A unique, friendly name for the asset model. +- `asset_model_name`: A unique name for the asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -568,7 +568,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use - CreateAssetModelCompositeModel. For more information, see <LINK>. + CreateAssetModelCompositeModel. For more information, see Creating custom composite models + (Components) in the IoT SiteWise User Guide. - `"assetModelDescription"`: A description for the asset model. - `"assetModelExternalId"`: An external ID to assign to the asset model. The external ID must be unique within your Amazon Web Services account. For more information, see Using @@ -649,7 +650,7 @@ with assetModelType of COMPONENT_MODEL. To create an inline model, specify the assetModelCompositeModelProperties and don't include an composedAssetModelId. # Arguments -- `asset_model_composite_model_name`: A unique, friendly name for the composite model. +- `asset_model_composite_model_name`: A unique name for the composite model. - `asset_model_composite_model_type`: The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. - `asset_model_id`: The ID of the asset model this composite model is a part of. @@ -666,12 +667,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique. - `"assetModelCompositeModelProperties"`: The property definitions of the composite model. - For more information, see <LINK>. You can specify up to 200 properties per composite - model. For more information, see Quotas in the IoT SiteWise User Guide. + For more information, see Inline custom composite models in the IoT SiteWise User Guide. + You can specify up to 200 properties per composite model. For more information, see Quotas + in the IoT SiteWise User Guide. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. -- `"composedAssetModelId"`: The ID of a composite model on this asset. +- `"composedAssetModelId"`: The ID of a component model which is reused to create this + composite model. - `"parentAssetModelCompositeModelId"`: The ID of the parent composite model in this asset model relationship. """ @@ -875,7 +878,7 @@ from local servers to IoT SiteWise. For more information, see Ingesting data usi gateway in the IoT SiteWise User Guide. # Arguments -- `gateway_name`: A unique, friendly name for the gateway. +- `gateway_name`: A unique name for the gateway. - `gateway_platform`: The gateway's platform. You can only specify one platform in a gateway. @@ -3622,19 +3625,19 @@ end Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User -Guide. This operation overwrites the existing model with the provided model. To avoid -deleting your asset model's properties or hierarchies, you must include their IDs and -definitions in the updated asset model payload. For more information, see -DescribeAssetModel. If you remove a property from an asset model, IoT SiteWise deletes all -previous data for that property. If you remove a hierarchy definition from an asset model, -IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the -type or data type of an existing property. +Guide. If you remove a property from an asset model, IoT SiteWise deletes all previous +data for that property. You can’t change the type or data type of an existing property. +To replace an existing asset model property with a new one with the same name, do the +following: Submit an UpdateAssetModel request with the entire existing property removed. + Submit a second UpdateAssetModel request that includes the new property. The new asset +property will have the same name as the previous one and IoT SiteWise will generate a new +unique id. # Arguments - `asset_model_id`: The ID of the asset model to update. This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one. For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide. -- `asset_model_name`: A unique, friendly name for the asset model. +- `asset_model_name`: A unique name for the asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3643,7 +3646,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use - CreateAssetModelCompositeModel. For more information, see <LINK>. + CreateAssetModelCompositeModel. For more information, see Creating custom composite models + (Components) in the IoT SiteWise User Guide. - `"assetModelDescription"`: A description for the asset model. - `"assetModelExternalId"`: An external ID to assign to the asset model. The asset model must not already have an external ID. The external ID must be unique within your Amazon Web @@ -3714,7 +3718,7 @@ previous one and IoT SiteWise will generate a new unique id. # Arguments - `asset_model_composite_model_id`: The ID of a composite model on this asset model. -- `asset_model_composite_model_name`: A unique, friendly name for the composite model. +- `asset_model_composite_model_name`: A unique name for the composite model. - `asset_model_id`: The ID of the asset model, in UUID format. # Optional Parameters @@ -3724,8 +3728,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys can only set the external ID of the asset model if it wasn't set when it was created, or you're setting it to the exact same thing as when it was created. - `"assetModelCompositeModelProperties"`: The property definitions of the composite model. - For more information, see <LINK>. You can specify up to 200 properties per composite - model. For more information, see Quotas in the IoT SiteWise User Guide. + For more information, see Inline custom composite models in the IoT SiteWise User Guide. + You can specify up to 200 properties per composite model. For more information, see Quotas + in the IoT SiteWise User Guide. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -3906,7 +3911,7 @@ Updates a gateway's name. # Arguments - `gateway_id`: The ID of the gateway to update. -- `gateway_name`: A unique, friendly name for the gateway. +- `gateway_name`: A unique name for the gateway. """ function update_gateway( diff --git a/src/services/ivs_realtime.jl b/src/services/ivs_realtime.jl index 8785c3e1d2..c298347d4e 100644 --- a/src/services/ivs_realtime.jl +++ b/src/services/ivs_realtime.jl @@ -100,8 +100,8 @@ Creates a new stage (and optionally participant tokens). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"autoParticipantRecordingConfiguration"`: Auto participant recording configuration - object attached to the stage. +- `"autoParticipantRecordingConfiguration"`: Configuration object for individual + participant recording, to attach to the new stage. - `"name"`: Optional name that can be specified for the stage being created. - `"participantTokenConfigurations"`: Array of participant token configuration objects to attach to the new stage. @@ -203,6 +203,38 @@ function delete_encoder_configuration( ) end +""" + delete_public_key(arn) + delete_public_key(arn, params::Dict{String,<:Any}) + +Deletes the specified public key used to sign stage participant tokens. This invalidates +future participant tokens generated using the key pair’s private key. + +# Arguments +- `arn`: ARN of the public key to be deleted. + +""" +function delete_public_key(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/DeletePublicKey", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_public_key( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/DeletePublicKey", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_stage(arn) delete_stage(arn, params::Dict{String,<:Any}) @@ -434,6 +466,37 @@ function get_participant( ) end +""" + get_public_key(arn) + get_public_key(arn, params::Dict{String,<:Any}) + +Gets information for the specified public key. + +# Arguments +- `arn`: ARN of the public key for which the information is to be retrieved. + +""" +function get_public_key(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/GetPublicKey", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_public_key( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/GetPublicKey", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_stage(arn) get_stage(arn, params::Dict{String,<:Any}) @@ -539,6 +602,52 @@ function get_storage_configuration( ) end +""" + import_public_key(public_key_material) + import_public_key(public_key_material, params::Dict{String,<:Any}) + +Import a public key to be used for signing stage participant tokens. + +# Arguments +- `public_key_material`: The content of the public key to be imported. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"name"`: Name of the public key to be imported. +- `"tags"`: Tags attached to the resource. Array of maps, each of the form string:string + (key:value). See Tagging AWS Resources for details, including restrictions that apply to + tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags + beyond what is documented there. +""" +function import_public_key( + publicKeyMaterial; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ImportPublicKey", + Dict{String,Any}("publicKeyMaterial" => publicKeyMaterial); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_public_key( + publicKeyMaterial, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ivs_realtime( + "POST", + "/ImportPublicKey", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("publicKeyMaterial" => publicKeyMaterial), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_compositions() list_compositions(params::Dict{String,<:Any}) @@ -725,6 +834,36 @@ function list_participants( ) end +""" + list_public_keys() + list_public_keys(params::Dict{String,<:Any}) + +Gets summary information about all public keys in your account, in the AWS region where the +API request is processed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Maximum number of results to return. Default: 50. +- `"nextToken"`: The first public key to retrieve. This is used for pagination; see the + nextToken response field. +""" +function list_public_keys(; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", "/ListPublicKeys"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_public_keys( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ListPublicKeys", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_stage_sessions(stage_arn) list_stage_sessions(stage_arn, params::Dict{String,<:Any}) @@ -1049,9 +1188,9 @@ Updates a stage’s configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"autoParticipantRecordingConfiguration"`: Auto-participant-recording configuration - object to attach to the stage. Auto-participant-recording configuration cannot be updated - while recording is active. +- `"autoParticipantRecordingConfiguration"`: Configuration object for individual + participant recording, to attach to the stage. Note that this cannot be updated while + recording is active. - `"name"`: Name of the stage to be updated. """ function update_stage(arn; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/kinesis_analytics_v2.jl b/src/services/kinesis_analytics_v2.jl index d8f45ba1f9..832d11d6c1 100644 --- a/src/services/kinesis_analytics_v2.jl +++ b/src/services/kinesis_analytics_v2.jl @@ -1012,6 +1012,52 @@ function describe_application( ) end +""" + describe_application_operation(application_name, operation_id) + describe_application_operation(application_name, operation_id, params::Dict{String,<:Any}) + +Returns information about a specific operation performed on a Managed Service for Apache +Flink application + +# Arguments +- `application_name`: +- `operation_id`: + +""" +function describe_application_operation( + ApplicationName, OperationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_analytics_v2( + "DescribeApplicationOperation", + Dict{String,Any}( + "ApplicationName" => ApplicationName, "OperationId" => OperationId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_application_operation( + ApplicationName, + OperationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_analytics_v2( + "DescribeApplicationOperation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationName" => ApplicationName, "OperationId" => OperationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_application_snapshot(application_name, snapshot_name) describe_application_snapshot(application_name, snapshot_name, params::Dict{String,<:Any}) @@ -1164,6 +1210,50 @@ function discover_input_schema( ) end +""" + list_application_operations(application_name) + list_application_operations(application_name, params::Dict{String,<:Any}) + +Lists information about operations performed on a Managed Service for Apache Flink +application + +# Arguments +- `application_name`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: +- `"NextToken"`: +- `"Operation"`: +- `"OperationStatus"`: +""" +function list_application_operations( + ApplicationName; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_analytics_v2( + "ListApplicationOperations", + Dict{String,Any}("ApplicationName" => ApplicationName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_application_operations( + ApplicationName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_analytics_v2( + "ListApplicationOperations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ApplicationName" => ApplicationName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_application_snapshots(application_name) list_application_snapshots(application_name, params::Dict{String,<:Any}) @@ -1324,11 +1414,10 @@ end rollback_application(application_name, current_application_version_id, params::Dict{String,<:Any}) Reverts the application to the previous running version. You can roll back an application -if you suspect it is stuck in a transient status. You can roll back an application only if -it is in the UPDATING or AUTOSCALING status. When you rollback an application, it loads -state data from the last successful snapshot. If the application has no snapshots, Managed -Service for Apache Flink rejects the rollback request. This action is not supported for -Managed Service for Apache Flink for SQL applications. +if you suspect it is stuck in a transient status or in the running status. You can roll +back an application only if it is in the UPDATING, AUTOSCALING, or RUNNING statuses. When +you rollback an application, it loads state data from the last successful snapshot. If the +application has no snapshots, Managed Service for Apache Flink rejects the rollback request. # Arguments - `application_name`: The name of the application. diff --git a/src/services/lex_models_v2.jl b/src/services/lex_models_v2.jl index 77bcf483cc..a57e3d838d 100644 --- a/src/services/lex_models_v2.jl +++ b/src/services/lex_models_v2.jl @@ -745,7 +745,8 @@ end Adds a new resource policy statement to a bot or bot alias. If a resource policy exists, the statement is added to the current resource policy. If a policy doesn't exist, a new policy is created. You can't create a resource policy statement that allows cross-account -access. +access. You need to add the CreateResourcePolicy or UpdateResourcePolicy action to the bot +role in order to call the API. # Arguments - `action`: The Amazon Lex action that this policy either allows or denies. The action must @@ -1434,7 +1435,8 @@ end Deletes a policy statement from a resource policy. If you delete the last statement from a policy, the policy is deleted. If you specify a statement ID that doesn't exist in the policy, or if the bot or bot alias doesn't have a policy attached, Amazon Lex returns an -exception. +exception. You need to add the DeleteResourcePolicy or UpdateResourcePolicy action to the +bot role in order to call the API. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the bot or bot alias that the resource diff --git a/src/services/license_manager_linux_subscriptions.jl b/src/services/license_manager_linux_subscriptions.jl index f9c028f39e..7505e96c4f 100644 --- a/src/services/license_manager_linux_subscriptions.jl +++ b/src/services/license_manager_linux_subscriptions.jl @@ -4,11 +4,97 @@ using AWS.AWSServices: license_manager_linux_subscriptions using AWS.Compat using AWS.UUIDs +""" + deregister_subscription_provider(subscription_provider_arn) + deregister_subscription_provider(subscription_provider_arn, params::Dict{String,<:Any}) + +Remove a third-party subscription provider from the Bring Your Own License (BYOL) +subscriptions registered to your account. + +# Arguments +- `subscription_provider_arn`: The Amazon Resource Name (ARN) of the subscription provider + resource to deregister. + +""" +function deregister_subscription_provider( + SubscriptionProviderArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/DeregisterSubscriptionProvider", + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function deregister_subscription_provider( + SubscriptionProviderArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/DeregisterSubscriptionProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_registered_subscription_provider(subscription_provider_arn) + get_registered_subscription_provider(subscription_provider_arn, params::Dict{String,<:Any}) + +Get details for a Bring Your Own License (BYOL) subscription that's registered to your +account. + +# Arguments +- `subscription_provider_arn`: The Amazon Resource Name (ARN) of the BYOL registration + resource to get details for. + +""" +function get_registered_subscription_provider( + SubscriptionProviderArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/GetRegisteredSubscriptionProvider", + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_registered_subscription_provider( + SubscriptionProviderArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/GetRegisteredSubscriptionProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_service_settings() get_service_settings(params::Dict{String,<:Any}) -Lists the Linux subscriptions service settings. +Lists the Linux subscriptions service settings for your account. """ function get_service_settings(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -40,15 +126,17 @@ subscriptions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: An array of structures that you can use to filter the results to those that - match one or more sets of key-value pairs that you specify. For example, you can filter by - the name of AmiID with an optional operator to see subscriptions that match, partially - match, or don't match a certain Amazon Machine Image (AMI) ID. The valid names for this - filter are: AmiID InstanceID AccountID Status Region UsageOperation - ProductCode InstanceType The valid Operators for this filter are: contains - equals Notequal -- `"MaxResults"`: Maximum number of results to return in a single call. -- `"NextToken"`: Token for the next set of results. +- `"Filters"`: An array of structures that you can use to filter the results by your + specified criteria. For example, you can specify Region in the Name, with the contains + operator to list all subscriptions that match a partial string in the Value, such as + us-west. For each filter, you can specify one of the following values for the Name key to + streamline results: AccountID AmiID DualSubscription InstanceID + InstanceType ProductCode Region Status UsageOperation For each filter, + you can use one of the following Operator values to define the behavior of the filter: + contains equals Notequal +- `"MaxResults"`: The maximum items to return in a request. +- `"NextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. """ function list_linux_subscription_instances(; aws_config::AbstractAWSConfig=global_aws_config() @@ -88,8 +176,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys partially match, or don't match a certain subscription's name. The valid names for this filter are: Subscription The valid Operators for this filter are: contains equals Notequal -- `"MaxResults"`: Maximum number of results to return in a single call. -- `"NextToken"`: Token for the next set of results. +- `"MaxResults"`: The maximum items to return in a request. +- `"NextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. """ function list_linux_subscriptions(; aws_config::AbstractAWSConfig=global_aws_config()) return license_manager_linux_subscriptions( @@ -111,6 +200,208 @@ function list_linux_subscriptions( ) end +""" + list_registered_subscription_providers() + list_registered_subscription_providers(params::Dict{String,<:Any}) + +List Bring Your Own License (BYOL) subscription registration resources for your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum items to return in a request. +- `"NextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. +- `"SubscriptionProviderSources"`: To filter your results, specify which subscription + providers to return in the list. +""" +function list_registered_subscription_providers(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListRegisteredSubscriptionProviders"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_registered_subscription_providers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListRegisteredSubscriptionProviders", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +List the metadata tags that are assigned to the specified Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which to list metadata + tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + register_subscription_provider(secret_arn, subscription_provider_source) + register_subscription_provider(secret_arn, subscription_provider_source, params::Dict{String,<:Any}) + +Register the supported third-party subscription provider for your Bring Your Own License +(BYOL) subscription. + +# Arguments +- `secret_arn`: The Amazon Resource Name (ARN) of the secret where you've stored your + subscription provider's access token. For RHEL subscriptions managed through the Red Hat + Subscription Manager (RHSM), the secret contains your Red Hat Offline token. +- `subscription_provider_source`: The supported Linux subscription provider to register. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: The metadata tags to assign to your registered Linux subscription provider + resource. +""" +function register_subscription_provider( + SecretArn, SubscriptionProviderSource; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/RegisterSubscriptionProvider", + Dict{String,Any}( + "SecretArn" => SecretArn, + "SubscriptionProviderSource" => SubscriptionProviderSource, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function register_subscription_provider( + SecretArn, + SubscriptionProviderSource, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/RegisterSubscriptionProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "SecretArn" => SecretArn, + "SubscriptionProviderSource" => SubscriptionProviderSource, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Add metadata tags to the specified Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Web Services resource to + which to add the specified metadata tags. +- `tags`: The metadata tags to assign to the Amazon Web Services resource. Tags are + formatted as key value pairs. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return license_manager_linux_subscriptions( + "PUT", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "PUT", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Remove one or more metadata tag from the specified Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Web Services resource to + remove the metadata tags from. +- `tag_keys`: A list of metadata tag keys to remove from the requested resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_service_settings(linux_subscriptions_discovery, linux_subscriptions_discovery_settings) update_service_settings(linux_subscriptions_discovery, linux_subscriptions_discovery_settings, params::Dict{String,<:Any}) diff --git a/src/services/mediaconnect.jl b/src/services/mediaconnect.jl index f286958480..074bd5329e 100644 --- a/src/services/mediaconnect.jl +++ b/src/services/mediaconnect.jl @@ -1861,6 +1861,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender’s minimum latency and the receiver’s minimum latency. +- `"outputStatus"`: An indication of whether the output should transmit data or not. If you + don't specify the outputStatus field in your request, MediaConnect leaves the value + unchanged. - `"port"`: The port to use when content is distributed to this output. - `"protocol"`: The protocol to use for the output. - `"remoteId"`: The remote ID for the Zixi-pull stream. diff --git a/src/services/medialive.jl b/src/services/medialive.jl index 6a03c9a9a0..2e3694ab5a 100644 --- a/src/services/medialive.jl +++ b/src/services/medialive.jl @@ -543,6 +543,7 @@ exactly two source URLs for redundancy. Only specify sources for PULL type Inputs. Leave Destinations empty. +- `"srtSettings"`: The settings associated with an SRT input. - `"tags"`: A collection of key-value pairs. - `"type"`: - `"vpc"`: @@ -3294,6 +3295,7 @@ exactly two source URLs for redundancy. Only specify sources for PULL type Inputs. Leave Destinations empty. +- `"srtSettings"`: The settings associated with an SRT input. """ function update_input(inputId; aws_config::AbstractAWSConfig=global_aws_config()) return medialive( diff --git a/src/services/medical_imaging.jl b/src/services/medical_imaging.jl index 7467de1fc8..fb35fb86a0 100644 --- a/src/services/medical_imaging.jl +++ b/src/services/medical_imaging.jl @@ -15,6 +15,11 @@ Copy an image set. - `datastore_id`: The data store identifier. - `source_image_set_id`: The source image set identifier. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"force"`: Setting this flag will force the CopyImageSet operation, even if Patient, + Study, or Series level metadata are mismatched across the sourceImageSet and + destinationImageSet. """ function copy_image_set( copyImageSetInformation, @@ -705,6 +710,12 @@ Update image set metadata attributes. - `latest_version`: The latest image set version identifier. - `update_image_set_metadata_updates`: Update image set metadata updates. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"force"`: Setting this flag will force the UpdateImageSetMetadata operation for the + following attributes: Tag.StudyInstanceUID, Tag.SeriesInstanceUID, Tag.SOPInstanceUID, + and Tag.StudyID Adding, removing, or updating private tags for an individual SOP + Instance """ function update_image_set_metadata( datastoreId, diff --git a/src/services/memorydb.jl b/src/services/memorydb.jl index a2874c1158..c3d5532a81 100644 --- a/src/services/memorydb.jl +++ b/src/services/memorydb.jl @@ -158,7 +158,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the r6gd node type. This parameter must be set when using r6gd nodes. For more information, see Data tiering. - `"Description"`: An optional description of the cluster. -- `"EngineVersion"`: The version number of the Redis engine to be used for the cluster. +- `"EngineVersion"`: The version number of the Redis OSS engine to be used for the cluster. - `"KmsKeyId"`: The ID of the KMS key used to encrypt the cluster. - `"MaintenanceWindow"`: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H @@ -473,7 +473,9 @@ end delete_cluster(cluster_name) delete_cluster(cluster_name, params::Dict{String,<:Any}) -Deletes a cluster. It also deletes all associated nodes and node endpoints +Deletes a cluster. It also deletes all associated nodes and node endpoints CreateSnapshot +permission is required to create a final snapshot. Without this permission, the API call +will fail with an Access Denied exception. # Arguments - `cluster_name`: The name of the cluster to be deleted @@ -718,13 +720,13 @@ end describe_engine_versions() describe_engine_versions(params::Dict{String,<:Any}) -Returns a list of the available Redis engine versions. +Returns a list of the available Redis OSS engine versions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DefaultOnly"`: If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. -- `"EngineVersion"`: The Redis engine version +- `"EngineVersion"`: The Redis OSS engine version - `"MaxResults"`: The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. diff --git a/src/services/mobile.jl b/src/services/mobile.jl deleted file mode 100644 index d348dc1d08..0000000000 --- a/src/services/mobile.jl +++ /dev/null @@ -1,299 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: mobile -using AWS.Compat -using AWS.UUIDs - -""" - create_project() - create_project(params::Dict{String,<:Any}) - - Creates an AWS Mobile Hub project. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"contents"`: ZIP or YAML file which contains configuration settings to be used when - creating the project. This may be the contents of the file downloaded from the URL provided - in an export project operation. -- `"name"`: Name of the project. -- `"region"`: Default region where project resources should be created. -- `"snapshotId"`: Unique identifier for an exported snapshot of project configuration. - This snapshot identifier is included in the share URL when a project is exported. -""" -function create_project(; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", "/projects"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function create_project( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return mobile( - "POST", "/projects", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - delete_project(project_id) - delete_project(project_id, params::Dict{String,<:Any}) - - Delets a project in AWS Mobile Hub. - -# Arguments -- `project_id`: Unique project identifier. - -""" -function delete_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "DELETE", - "/projects/$(projectId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "DELETE", - "/projects/$(projectId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - describe_bundle(bundle_id) - describe_bundle(bundle_id, params::Dict{String,<:Any}) - - Get the bundle details for the requested bundle id. - -# Arguments -- `bundle_id`: Unique bundle identifier. - -""" -function describe_bundle(bundleId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "GET", - "/bundles/$(bundleId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function describe_bundle( - bundleId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "GET", - "/bundles/$(bundleId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - describe_project(project_id) - describe_project(project_id, params::Dict{String,<:Any}) - - Gets details about a project in AWS Mobile Hub. - -# Arguments -- `project_id`: Unique project identifier. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"syncFromResources"`: If set to true, causes AWS Mobile Hub to synchronize information - from other services, e.g., update state of AWS CloudFormation stacks in the AWS Mobile Hub - project. -""" -function describe_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "GET", - "/project", - Dict{String,Any}("projectId" => projectId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function describe_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "GET", - "/project", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("projectId" => projectId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - export_bundle(bundle_id) - export_bundle(bundle_id, params::Dict{String,<:Any}) - - Generates customized software development kit (SDK) and or tool packages used to integrate -mobile web or mobile app clients with backend AWS resources. - -# Arguments -- `bundle_id`: Unique bundle identifier. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"platform"`: Developer desktop or target application platform. -- `"projectId"`: Unique project identifier. -""" -function export_bundle(bundleId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", - "/bundles/$(bundleId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function export_bundle( - bundleId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "POST", - "/bundles/$(bundleId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - export_project(project_id) - export_project(project_id, params::Dict{String,<:Any}) - - Exports project configuration to a snapshot which can be downloaded and shared. Note that -mobile app push credentials are encrypted in exported projects, so they can only be shared -successfully within the same AWS account. - -# Arguments -- `project_id`: Unique project identifier. - -""" -function export_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", - "/exports/$(projectId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function export_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "POST", - "/exports/$(projectId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_bundles() - list_bundles(params::Dict{String,<:Any}) - - List all available bundles. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Maximum number of records to list in a single response. -- `"nextToken"`: Pagination token. Set to null to start listing bundles from start. If - non-null pagination token is returned in a result, then pass its value in here in another - request to list more bundles. -""" -function list_bundles(; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile("GET", "/bundles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) -end -function list_bundles( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return mobile( - "GET", "/bundles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_projects() - list_projects(params::Dict{String,<:Any}) - - Lists projects in AWS Mobile Hub. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Maximum number of records to list in a single response. -- `"nextToken"`: Pagination token. Set to null to start listing projects from start. If - non-null pagination token is returned in a result, then pass its value in here in another - request to list more projects. -""" -function list_projects(; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "GET", "/projects"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_projects( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return mobile( - "GET", "/projects", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - update_project(project_id) - update_project(project_id, params::Dict{String,<:Any}) - - Update an existing project. - -# Arguments -- `project_id`: Unique project identifier. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"contents"`: ZIP or YAML file which contains project configuration to be updated. This - should be the contents of the file downloaded from the URL provided in an export project - operation. -""" -function update_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", - "/update", - Dict{String,Any}("projectId" => projectId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "POST", - "/update", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("projectId" => projectId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end diff --git a/src/services/mq.jl b/src/services/mq.jl index 69f6b4e24a..8d8ef4ffb5 100644 --- a/src/services/mq.jl +++ b/src/services/mq.jl @@ -5,8 +5,8 @@ using AWS.Compat using AWS.UUIDs """ - create_broker(auto_minor_version_upgrade, broker_name, deployment_mode, engine_type, engine_version, host_instance_type, publicly_accessible, users) - create_broker(auto_minor_version_upgrade, broker_name, deployment_mode, engine_type, engine_version, host_instance_type, publicly_accessible, users, params::Dict{String,<:Any}) + create_broker(broker_name, deployment_mode, engine_type, host_instance_type, publicly_accessible, users) + create_broker(broker_name, deployment_mode, engine_type, host_instance_type, publicly_accessible, users, params::Dict{String,<:Any}) Creates a broker. Note: This API is asynchronous. To create a broker, you must either use the AmazonMQFullAccess IAM policy or include the following EC2 permissions in your IAM @@ -21,10 +21,6 @@ Your Amazon Web Services Credentials and Never Modify or Delete the Amazon MQ El Network Interface in the Amazon MQ Developer Guide. # Arguments -- `auto_minor_version_upgrade`: Enables automatic upgrades to new minor versions for - brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur - during the scheduled maintenance window of the broker or after a manual broker reboot. Set - to true by default, if no value is specified. - `broker_name`: Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special @@ -35,8 +31,6 @@ Network Interface in the Amazon MQ Developer Guide. - `deployment_mode`: Required. The broker's deployment mode. - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. -- `engine_version`: Required. The broker engine's version. For a list of supported engine - versions, see Supported engines. - `host_instance_type`: Required. The broker's instance type. - `publicly_accessible`: Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided. @@ -49,6 +43,11 @@ Network Interface in the Amazon MQ Developer Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy used to secure the broker. The default is SIMPLE. +- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new patch versions for brokers + as new versions are released and supported by Amazon MQ. Automatic upgrades occur during + the scheduled maintenance window or after a manual broker reboot. Set to true by default, + if no value is specified. Must be set to true for ActiveMQ brokers version 5.18 and above + and for RabbitMQ brokers version 3.13 and above. - `"configuration"`: A list of information about the configuration. - `"creatorRequestId"`: The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action. We recommend using a Universally Unique @@ -59,6 +58,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR. - `"encryptionOptions"`: Encryption options for the broker. +- `"engineVersion"`: The broker engine version. Defaults to the latest available version + for the specified broker engine type. For more information, see the ActiveMQ version + management and the RabbitMQ version management sections in the Amazon MQ Developer Guide. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers. - `"logs"`: Enables Amazon CloudWatch logging for brokers. @@ -81,11 +83,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"tags"`: Create tags when creating the broker. """ function create_broker( - autoMinorVersionUpgrade, brokerName, deploymentMode, engineType, - engineVersion, hostInstanceType, publiclyAccessible, users; @@ -95,11 +95,9 @@ function create_broker( "POST", "/v1/brokers", Dict{String,Any}( - "autoMinorVersionUpgrade" => autoMinorVersionUpgrade, "brokerName" => brokerName, "deploymentMode" => deploymentMode, "engineType" => engineType, - "engineVersion" => engineVersion, "hostInstanceType" => hostInstanceType, "publiclyAccessible" => publiclyAccessible, "users" => users, @@ -110,11 +108,9 @@ function create_broker( ) end function create_broker( - autoMinorVersionUpgrade, brokerName, deploymentMode, engineType, - engineVersion, hostInstanceType, publiclyAccessible, users, @@ -128,11 +124,9 @@ function create_broker( mergewith( _merge, Dict{String,Any}( - "autoMinorVersionUpgrade" => autoMinorVersionUpgrade, "brokerName" => brokerName, "deploymentMode" => deploymentMode, "engineType" => engineType, - "engineVersion" => engineVersion, "hostInstanceType" => hostInstanceType, "publiclyAccessible" => publiclyAccessible, "users" => users, @@ -147,8 +141,8 @@ function create_broker( end """ - create_configuration(engine_type, engine_version, name) - create_configuration(engine_type, engine_version, name, params::Dict{String,<:Any}) + create_configuration(engine_type, name) + create_configuration(engine_type, name, params::Dict{String,<:Any}) Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version). @@ -156,8 +150,6 @@ default configuration (the engine type and version). # Arguments - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. -- `engine_version`: Required. The broker engine's version. For a list of supported engine - versions, see Supported engines. - `name`: Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long. @@ -166,24 +158,24 @@ default configuration (the engine type and version). Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy associated with the configuration. The default is SIMPLE. +- `"engineVersion"`: The broker engine version. Defaults to the latest available version + for the specified broker engine type. For more information, see the ActiveMQ version + management and the RabbitMQ version management sections in the Amazon MQ Developer Guide. - `"tags"`: Create tags when creating the configuration. """ function create_configuration( - engineType, engineVersion, name; aws_config::AbstractAWSConfig=global_aws_config() + engineType, name; aws_config::AbstractAWSConfig=global_aws_config() ) return mq( "POST", "/v1/configurations", - Dict{String,Any}( - "engineType" => engineType, "engineVersion" => engineVersion, "name" => name - ); + Dict{String,Any}("engineType" => engineType, "name" => name); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_configuration( engineType, - engineVersion, name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -193,13 +185,7 @@ function create_configuration( "/v1/configurations", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "engineType" => engineType, - "engineVersion" => engineVersion, - "name" => name, - ), - params, + _merge, Dict{String,Any}("engineType" => engineType, "name" => name), params ), ); aws_config=aws_config, @@ -861,13 +847,16 @@ Adds a pending configuration change to a broker. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy used to secure the broker. The default is SIMPLE. -- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new minor versions for - brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur - during the scheduled maintenance window of the broker or after a manual broker reboot. +- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new patch versions for brokers + as new versions are released and supported by Amazon MQ. Automatic upgrades occur during + the scheduled maintenance window or after a manual broker reboot. Must be set to true for + ActiveMQ brokers version 5.18 and above and for RabbitMQ brokers version 3.13 and above. - `"configuration"`: A list of information about the configuration. - `"dataReplicationMode"`: Defines whether this broker is a part of a data replication pair. -- `"engineVersion"`: The broker engine version. For a list of supported engine versions, - see Supported engines. +- `"engineVersion"`: The broker engine version. For more information, see the ActiveMQ + version management and the RabbitMQ version management sections in the Amazon MQ Developer + Guide. When upgrading to ActiveMQ version 5.18 and above or RabbitMQ version 3.13 and + above, you must have autoMinorVersionUpgrade set to true for the broker. - `"hostInstanceType"`: The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate diff --git a/src/services/network_firewall.jl b/src/services/network_firewall.jl index 332460ed1e..bdfd2e3c2e 100644 --- a/src/services/network_firewall.jl +++ b/src/services/network_firewall.jl @@ -381,17 +381,21 @@ end create_tlsinspection_configuration(tlsinspection_configuration, tlsinspection_configuration_name) create_tlsinspection_configuration(tlsinspection_configuration, tlsinspection_configuration_name, params::Dict{String,<:Any}) -Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration -contains Certificate Manager certificate associations between and the scope configurations -that Network Firewall uses to decrypt and re-encrypt traffic traveling through your -firewall. After you create a TLS inspection configuration, you can associate it with a new -firewall policy. To update the settings for a TLS inspection configuration, use -UpdateTLSInspectionConfiguration. To manage a TLS inspection configuration's tags, use the -standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, -and UntagResource. To retrieve information about TLS inspection configurations, use -ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration. For more -information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS -inspection configurations in the Network Firewall Developer Guide. +Creates an Network Firewall TLS inspection configuration. Network Firewall uses TLS +inspection configurations to decrypt your firewall's inbound and outbound SSL/TLS traffic. +After decryption, Network Firewall inspects the traffic according to your firewall policy's +stateful rules, and then re-encrypts it before sending it to its destination. You can +enable inspection of your firewall's inbound traffic, outbound traffic, or both. To use TLS +inspection with your firewall, you must first import or provision certificates using ACM, +create a TLS inspection configuration, add that configuration to a new firewall policy, and +then associate that policy with your firewall. To update the settings for a TLS inspection +configuration, use UpdateTLSInspectionConfiguration. To manage a TLS inspection +configuration's tags, use the standard Amazon Web Services resource tagging operations, +ListTagsForResource, TagResource, and UntagResource. To retrieve information about TLS +inspection configurations, use ListTLSInspectionConfigurations and +DescribeTLSInspectionConfiguration. For more information about TLS inspection +configurations, see Inspecting SSL/TLS traffic with TLS inspection configurations in the +Network Firewall Developer Guide. # Arguments - `tlsinspection_configuration`: The object that defines a TLS inspection configuration. diff --git a/src/services/opensearch.jl b/src/services/opensearch.jl index c45c383def..282f8ec461 100644 --- a/src/services/opensearch.jl +++ b/src/services/opensearch.jl @@ -297,6 +297,7 @@ managing Amazon OpenSearch Service domains. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AIMLOptions"`: Options for all machine learning features for the specified domain. - `"AccessPolicies"`: Identity and Access Management (IAM) policy document specifying the access policies for the new domain. - `"AdvancedOptions"`: Key-value pairs to specify advanced configuration options. The @@ -2355,6 +2356,7 @@ Modifies the cluster configuration of the specified Amazon OpenSearch Service do # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AIMLOptions"`: Options for all machine learning features for the specified domain. - `"AccessPolicies"`: Identity and Access Management (IAM) access policy as a JSON-formatted string. - `"AdvancedOptions"`: Key-value pairs to specify advanced configuration options. The diff --git a/src/services/organizations.jl b/src/services/organizations.jl index ffb0df1a7b..83614b04b2 100644 --- a/src/services/organizations.jl +++ b/src/services/organizations.jl @@ -233,24 +233,23 @@ from the organization's management account. For more information about creating see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such -as a payment method and signing the end user license agreement (EULA) is not automatically -collected. If you must remove an account from your organization later, you can do so only -after you provide the missing information. For more information, see Considerations before -removing an account from an organization in the Organizations User Guide. If you get an -exception that indicates that you exceeded your account limits for the organization, -contact Amazon Web Services Support. If you get an exception that indicates that the -operation failed because your organization is still initializing, wait one hour and then -try again. If the error persists, contact Amazon Web Services Support. Using -CreateAccount to create multiple temporary accounts isn't recommended. You can only close -an account from the Billing and Cost Management console, and you must be signed in as the -root user. For information on the requirements and process for closing an account, see -Closing a member account in your organization in the Organizations User Guide. When you -create a member account with this operation, you can choose whether to create the account -with the IAM User and Role Access to Billing Information switch enabled. If you enable it, -IAM users and roles that have appropriate permissions can view billing information for the -account. If you disable it, only the account root user can access billing information. For -information about how to disable this switch for an account, see Granting access to your -billing information and tools. +as a payment method is not automatically collected. If you must remove an account from your +organization later, you can do so only after you provide the missing information. For more +information, see Considerations before removing an account from an organization in the +Organizations User Guide. If you get an exception that indicates that you exceeded your +account limits for the organization, contact Amazon Web Services Support. If you get an +exception that indicates that the operation failed because your organization is still +initializing, wait one hour and then try again. If the error persists, contact Amazon Web +Services Support. Using CreateAccount to create multiple temporary accounts isn't +recommended. You can only close an account from the Billing and Cost Management console, +and you must be signed in as the root user. For information on the requirements and process +for closing an account, see Closing a member account in your organization in the +Organizations User Guide. When you create a member account with this operation, you can +choose whether to create the account with the IAM User and Role Access to Billing +Information switch enabled. If you enable it, IAM users and roles that have appropriate +permissions can view billing information for the account. If you disable it, only the +account root user can access billing information. For information about how to disable this +switch for an account, see Granting access to your billing information and tools. # Arguments - `account_name`: The friendly name of the member account. diff --git a/src/services/payment_cryptography_data.jl b/src/services/payment_cryptography_data.jl index cd9714f20c..08697fd465 100644 --- a/src/services/payment_cryptography_data.jl +++ b/src/services/payment_cryptography_data.jl @@ -32,8 +32,14 @@ operations: EncryptData GetPublicCertificate ImportKey - `cipher_text`: The ciphertext to decrypt. - `decryption_attributes`: The encryption key type and attributes for ciphertext decryption. - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment - Cryptography uses for ciphertext decryption. + Cryptography uses for ciphertext decryption. When a WrappedKeyBlock is provided, this value + will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to + perform the operation. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WrappedKey"`: The WrappedKeyBlock containing the encryption key for ciphertext + decryption. """ function decrypt_data( CipherText, @@ -106,12 +112,18 @@ ImportKey ReEncryptData # Arguments - `encryption_attributes`: The encryption key type and attributes for plaintext encryption. - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment - Cryptography uses for plaintext encryption. + Cryptography uses for plaintext encryption. When a WrappedKeyBlock is provided, this value + will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to + perform the operation. - `plain_text`: The plaintext to be encrypted. For encryption using asymmetric keys, plaintext data length is constrained by encryption key strength that you define in KeyAlgorithm and padding type that you define in AsymmetricEncryptionAttributes. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WrappedKey"`: The WrappedKeyBlock containing the encryption key for plaintext + encryption. """ function encrypt_data( EncryptionAttributes, @@ -396,32 +408,37 @@ end re_encrypt_data(cipher_text, incoming_encryption_attributes, incoming_key_identifier, outgoing_encryption_attributes, outgoing_key_identifier) re_encrypt_data(cipher_text, incoming_encryption_attributes, incoming_key_identifier, outgoing_encryption_attributes, outgoing_key_identifier, params::Dict{String,<:Any}) -Re-encrypt ciphertext using DUKPT, Symmetric and Asymmetric Data Encryption Keys. You can -either generate an encryption key within Amazon Web Services Payment Cryptography by -calling CreateKey or import your own encryption key by calling ImportKey. The KeyArn for -use with this operation must be in a compatible key state with KeyModesOfUse set to -Encrypt. In asymmetric encryption, ciphertext is encrypted using public component (imported -by calling ImportKey) of the asymmetric key pair created outside of Amazon Web Services -Payment Cryptography. For symmetric and DUKPT encryption, Amazon Web Services Payment -Cryptography supports TDES and AES algorithms. For asymmetric encryption, Amazon Web -Services Payment Cryptography supports RSA. To encrypt using DUKPT, a DUKPT key must -already exist within your account with KeyModesOfUse set to DeriveKey or a new DUKPT can be -generated by calling CreateKey. For information about valid keys for this operation, see -Understanding key attributes and Key types for specific data operations in the Amazon Web -Services Payment Cryptography User Guide. Cross-account use: This operation can't be used -across different Amazon Web Services accounts. Related operations: DecryptData -EncryptData GetPublicCertificate ImportKey +Re-encrypt ciphertext using DUKPT or Symmetric data encryption keys. You can either +generate an encryption key within Amazon Web Services Payment Cryptography by calling +CreateKey or import your own encryption key by calling ImportKey. The KeyArn for use with +this operation must be in a compatible key state with KeyModesOfUse set to Encrypt. For +symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and +AES algorithms. To encrypt using DUKPT, a DUKPT key must already exist within your account +with KeyModesOfUse set to DeriveKey or a new DUKPT can be generated by calling CreateKey. +For information about valid keys for this operation, see Understanding key attributes and +Key types for specific data operations in the Amazon Web Services Payment Cryptography User +Guide. Cross-account use: This operation can't be used across different Amazon Web +Services accounts. Related operations: DecryptData EncryptData +GetPublicCertificate ImportKey # Arguments - `cipher_text`: Ciphertext to be encrypted. The minimum allowed length is 16 bytes and maximum allowed length is 4096 bytes. - `incoming_encryption_attributes`: The attributes and values for incoming ciphertext. - `incoming_key_identifier`: The keyARN of the encryption key of incoming ciphertext data. + When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping + key. Otherwise, it is the key identifier used to perform the operation. - `outgoing_encryption_attributes`: The attributes and values for outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography. - `outgoing_key_identifier`: The keyARN of the encryption key of outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IncomingWrappedKey"`: The WrappedKeyBlock containing the encryption key of incoming + ciphertext data. +- `"OutgoingWrappedKey"`: The WrappedKeyBlock containing the encryption key of outgoing + ciphertext data after encryption by Amazon Web Services Payment Cryptography. """ function re_encrypt_data( CipherText, @@ -500,7 +517,9 @@ operations: GeneratePinData VerifyPinData - `encrypted_pin_block`: The encrypted PIN block data that Amazon Web Services Payment Cryptography translates. - `incoming_key_identifier`: The keyARN of the encryption key under which incoming PIN - block data is encrypted. This key type can be PEK or BDK. + block data is encrypted. This key type can be PEK or BDK. When a WrappedKeyBlock is + provided, this value will be the identifier to the key wrapping key for PIN block. + Otherwise, it is the key identifier used to perform the operation. - `incoming_translation_attributes`: The format of the incoming PIN block data for translation within Amazon Web Services Payment Cryptography. - `outgoing_key_identifier`: The keyARN of the encryption key for encrypting outgoing PIN @@ -512,8 +531,12 @@ operations: GeneratePinData VerifyPinData Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IncomingDukptAttributes"`: The attributes and values to use for incoming DUKPT encryption key for PIN block translation. +- `"IncomingWrappedKey"`: The WrappedKeyBlock containing the encryption key under which + incoming PIN block data is encrypted. - `"OutgoingDukptAttributes"`: The attributes and values to use for outgoing DUKPT encryption key after PIN block translation. +- `"OutgoingWrappedKey"`: The WrappedKeyBlock containing the encryption key for encrypting + outgoing PIN block data. """ function translate_pin_data( EncryptedPinBlock, diff --git a/src/services/pi.jl b/src/services/pi.jl index 8d1ac9c6e8..5941314693 100644 --- a/src/services/pi.jl +++ b/src/services/pi.jl @@ -173,7 +173,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys \"AdditionalMetrics\" : { \"string\" : \"string\" }. - `"Filter"`: One or more filters to apply in the request. Restrictions: Any number of filters by the same dimension, as specified in the GroupBy or Partition parameters. A - single filter for any other dimension in this dimension group. + single filter for any other dimension in this dimension group. The db.sql.db_id filter + isn't available for RDS for SQL Server DB instances. - `"MaxResults"`: The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved. diff --git a/src/services/pinpoint_sms_voice_v2.jl b/src/services/pinpoint_sms_voice_v2.jl index f9c6893414..0ce86979f2 100644 --- a/src/services/pinpoint_sms_voice_v2.jl +++ b/src/services/pinpoint_sms_voice_v2.jl @@ -184,12 +184,12 @@ end create_event_destination(configuration_set_name, event_destination_name, matching_event_types, params::Dict{String,<:Any}) Creates a new event destination in a configuration set. An event destination is a location -where you send message events. The event options are Amazon CloudWatch, Amazon Kinesis Data +where you send message events. The event options are Amazon CloudWatch, Amazon Data Firehose, or Amazon SNS. For example, when a message is delivered successfully, you can send information about that event to an event destination, or send notifications to endpoints that are subscribed to an Amazon SNS topic. Each configuration set can contain between 0 and 5 event destinations. Each event destination can contain a reference to a -single destination, such as a CloudWatch or Kinesis Data Firehose destination. +single destination, such as a CloudWatch or Firehose destination. # Arguments - `configuration_set_name`: Either the name of the configuration set or the configuration @@ -197,8 +197,8 @@ single destination, such as a CloudWatch or Kinesis Data Firehose destination. found using the DescribeConfigurationSets action. - `event_destination_name`: The name that identifies the event destination. - `matching_event_types`: An array of event types that determine which events to log. If - \"ALL\" is used, then Amazon Pinpoint logs every event type. The TEXT_SENT event type is - not supported. + \"ALL\" is used, then AWS End User Messaging SMS and Voice logs every event type. The + TEXT_SENT event type is not supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -208,7 +208,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CloudWatchLogsDestination"`: An object that contains information about an event destination for logging to Amazon CloudWatch Logs. - `"KinesisFirehoseDestination"`: An object that contains information about an event - destination for logging to Amazon Kinesis Data Firehose. + destination for logging to Amazon Data Firehose. - `"SnsDestination"`: An object that contains information about an event destination for logging to Amazon SNS. """ @@ -265,7 +265,7 @@ An opt-out list is a list of phone numbers that are opted out, meaning you can't or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out -keywords, see SMS opt out in the Amazon Pinpoint User Guide. +keywords, see SMS opt out in the AWS End User Messaging SMS User Guide. # Arguments - `opt_out_list_name`: The name of the new OptOutList. @@ -328,11 +328,12 @@ be associated with multiple pools. country or region of the new pool. - `message_type`: The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or - time-sensitive. + time-sensitive. After the pool is created the MessageType can't be changed. - `origination_identity`: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the - values for SenderId and SenderIdArn. + values for SenderId and SenderIdArn. After the pool is created you can add more origination + identities to the pool by using AssociateOriginationIdentity. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -871,8 +872,8 @@ Deletes an existing keyword from an origination phone number or pool. A keyword that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins -with a keyword, Amazon Pinpoint responds with a customizable message. Keywords \"HELP\" and -\"STOP\" can't be deleted or modified. +with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message. +Keywords \"HELP\" and \"STOP\" can't be deleted or modified. # Arguments - `keyword`: The keyword to delete. @@ -1230,8 +1231,8 @@ end Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is -controlled by Amazon Web Services. For more information on spend limits (quotas) see Amazon -Pinpoint quotas in the Amazon Pinpoint Developer Guide. +controlled by Amazon Web Services. For more information on spend limits (quotas) see Quotas + in the AWS End User Messaging SMS User Guide. """ function delete_text_message_spend_limit_override(; @@ -1302,8 +1303,8 @@ end Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by -Amazon Web Services. For more information on spending limits (quotas) see Amazon Pinpoint -quotas in the Amazon Pinpoint Developer Guide. +Amazon Web Services. For more information on spending limits (quotas) see Quotas in the +AWS End User Messaging SMS User Guide. """ function delete_voice_message_spend_limit_override(; @@ -1333,9 +1334,9 @@ end Describes attributes of your Amazon Web Services account. The supported account attributes include account tier, which indicates whether your account is in the sandbox or production environment. When you're ready to move your account out of the sandbox, create an Amazon -Web Services Support case for a service limit increase request. New Amazon Pinpoint -accounts are placed into an SMS or voice sandbox. The sandbox protects both Amazon Web -Services end recipients and SMS or voice recipients from fraud and abuse. +Web Services Support case for a service limit increase request. New accounts are placed +into an SMS or voice sandbox. The sandbox protects both Amazon Web Services end recipients +and SMS or voice recipients from fraud and abuse. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1363,12 +1364,12 @@ end describe_account_limits() describe_account_limits(params::Dict{String,<:Any}) -Describes the current Amazon Pinpoint SMS Voice V2 resource quotas for your account. The -description for a quota includes the quota name, current usage toward that quota, and the -quota's maximum value. When you establish an Amazon Web Services account, the account has -initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, -and pools that you can create in a given Region. For more information see Amazon Pinpoint -quotas in the Amazon Pinpoint Developer Guide. +Describes the current AWS End User Messaging SMS and Voice SMS Voice V2 resource quotas for +your account. The description for a quota includes the quota name, current usage toward +that quota, and the quota's maximum value. When you establish an Amazon Web Services +account, the account has initial quotas on the maximum number of configuration sets, +opt-out lists, phone numbers, and pools that you can create in a given Region. For more +information see Quotas in the AWS End User Messaging SMS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1436,8 +1437,8 @@ Describes the specified keywords or all keywords on your origination phone numbe A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a -message that begins with a keyword, Amazon Pinpoint responds with a customizable message. -If you specify a keyword that isn't valid, an error is returned. +message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a +customizable message. If you specify a keyword that isn't valid, an error is returned. # Arguments - `origination_identity`: The origination identity to use such as a PhoneNumberId, @@ -1987,11 +1988,11 @@ end describe_spend_limits() describe_spend_limits(params::Dict{String,<:Any}) -Describes the current Amazon Pinpoint monthly spend limits for sending voice and text -messages. When you establish an Amazon Web Services account, the account has initial -monthly spend limit in a given Region. For more information on increasing your monthly -spend limit, see Requesting increases to your monthly SMS spending quota for Amazon -Pinpoint in the Amazon Pinpoint User Guide. +Describes the current monthly spend limits for sending voice and text messages. When you +establish an Amazon Web Services account, the account has initial monthly spend limit in a +given Region. For more information on increasing your monthly spend limit, see Requesting +increases to your monthly SMS, MMS, or Voice spending quota in the AWS End User Messaging +SMS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2377,8 +2378,8 @@ Creates or updates a keyword configuration on an origination phone number or poo keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message -that begins with a keyword, Amazon Pinpoint responds with a customizable message. If you -specify a keyword that isn't valid, an error is returned. +that begins with a keyword, AWS End User Messaging SMS and Voice responds with a +customizable message. If you specify a keyword that isn't valid, an error is returned. # Arguments - `keyword`: The new keyword to add. @@ -2618,7 +2619,7 @@ end request_phone_number(iso_country_code, message_type, number_capabilities, number_type, params::Dict{String,<:Any}) Request an origination phone number for use in your account. For more information on phone -number request see Requesting a number in the Amazon Pinpoint User Guide. +number request see Request a phone number in the AWS End User Messaging SMS User Guide. # Arguments - `iso_country_code`: The two-character code, in ISO 3166-1 alpha-2 format, for the country @@ -2892,11 +2893,12 @@ end send_text_message(destination_phone_number) send_text_message(destination_phone_number, params::Dict{String,<:Any}) -Creates a new text message and sends it to a recipient's phone number. SMS throughput -limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the -destination country of your messages, as well as the type of phone number (origination -number) that you use to send the message. For more information, see Message Parts per -Second (MPS) limits in the Amazon Pinpoint User Guide. +Creates a new text message and sends it to a recipient's phone number. SendTextMessage only +sends an SMS message to one recipient each time it is invoked. SMS throughput limits are +measured in Message Parts per Second (MPS). Your MPS limit depends on the destination +country of your messages, as well as the type of phone number (origination number) that you +use to send the message. For more information about MPS, see Message Parts per Second (MPS) +limits in the AWS End User Messaging SMS User Guide. # Arguments - `destination_phone_number`: The destination phone number in E.164 format. @@ -2910,20 +2912,31 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DestinationCountryParameters"`: This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for - sending SMS messages to recipients in India. + sending SMS messages to recipients in India. IN_ENTITY_ID The entity ID or Principal + Entity (PE) ID that you received after completing the sender ID registration process. + IN_TEMPLATE_ID The template ID that you received after completing the sender ID + registration process. Make sure that the Template ID that you specify matches your message + template exactly. If your message doesn't match the template that you provided during the + registration process, the mobile carriers might reject your message. - `"DryRun"`: When set to true, the message is checked and validated, but isn't sent to the - end recipient. + end recipient. You are not charged for using DryRun. The Message Parts per Second (MPS) + limit when using DryRun is five. If your origination identity has a lower MPS limit then + the lower MPS limit is used. For more information about MPS limits, see Message Parts per + Second (MPS) limits in the AWS End User Messaging SMS User Guide.. - `"Keyword"`: When you register a short code in the US, you must specify a program name. If you don’t have a US short code, omit this attribute. - `"MaxPrice"`: The maximum amount that you want to spend, in US dollars, per each text - message part. A text message can contain multiple parts. + message. If the calculated amount to send the text message is greater than MaxPrice, the + message is not sent and an error is returned. - `"MessageBody"`: The body of the text message. - `"MessageType"`: The type of message. Valid values are for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. - `"OriginationIdentity"`: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. - `"ProtectConfigurationId"`: The unique identifier for the protect configuration. -- `"TimeToLive"`: How long the text message is valid for. By default this is 72 hours. +- `"TimeToLive"`: How long the text message is valid for, in seconds. By default this is 72 + hours. If the messages isn't handed off before the TTL expires we stop attempting to hand + off the message and return TTL_EXPIRED event. """ function send_text_message( DestinationPhoneNumber; aws_config::AbstractAWSConfig=global_aws_config() @@ -2958,8 +2971,8 @@ end send_voice_message(destination_phone_number, origination_identity) send_voice_message(destination_phone_number, origination_identity, params::Dict{String,<:Any}) -Allows you to send a request that sends a voice message through Amazon Pinpoint. This -operation uses Amazon Polly to convert a text script into a voice message. +Allows you to send a request that sends a voice message. This operation uses Amazon Polly +to convert a text script into a voice message. # Arguments - `destination_phone_number`: The destination phone number in E.164 format. @@ -3320,11 +3333,11 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Adds or overwrites only the specified tags for the specified Amazon Pinpoint SMS Voice, -version 2 resource. When you specify an existing tag key, the value is overwritten with the -new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and an -optional value. Tag keys must be unique per resource. For more information about tags, see -Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer Guide. +Adds or overwrites only the specified tags for the specified resource. When you specify an +existing tag key, the value is overwritten with the new value. Each resource can have a +maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be +unique per resource. For more information about tags, see Tags in the AWS End User +Messaging SMS User Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource. @@ -3363,9 +3376,8 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes the association of the specified tags from an Amazon Pinpoint SMS Voice V2 -resource. For more information on tags see Tagging Amazon Pinpoint resources in the Amazon -Pinpoint Developer Guide. +Removes the association of the specified tags from a resource. For more information on tags +see Tags in the AWS End User Messaging SMS User Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource. @@ -3407,10 +3419,10 @@ end update_event_destination(configuration_set_name, event_destination_name, params::Dict{String,<:Any}) Updates an existing event destination in a configuration set. You can update the IAM role -ARN for CloudWatch Logs and Kinesis Data Firehose. You can also enable or disable the event -destination. You may want to update an event destination to change its matching event types -or updating the destination resource ARN. You can't change an event destination's type -between CloudWatch Logs, Kinesis Data Firehose, and Amazon SNS. +ARN for CloudWatch Logs and Firehose. You can also enable or disable the event destination. +You may want to update an event destination to change its matching event types or updating +the destination resource ARN. You can't change an event destination's type between +CloudWatch Logs, Firehose, and Amazon SNS. # Arguments - `configuration_set_name`: The configuration set to update with the new event destination. @@ -3423,7 +3435,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys destination that sends data to CloudWatch Logs. - `"Enabled"`: When set to true logging is enabled. - `"KinesisFirehoseDestination"`: An object that contains information about an event - destination for logging to Kinesis Data Firehose. + destination for logging to Firehose. - `"MatchingEventTypes"`: An array of event types that determine which events to log. The TEXT_SENT event type is not supported. - `"SnsDestination"`: An object that contains information about an event destination that @@ -3487,10 +3499,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"OptOutListName"`: The OptOutList to add the phone number to. Valid values for this field can be either the OutOutListName or OutOutListArn. - `"SelfManagedOptOutsEnabled"`: By default this is set to false. When an end recipient - sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon - Pinpoint automatically replies with a customizable message and adds the end recipient to - the OptOutList. When set to true you're responsible for responding to HELP and STOP - requests. You're also responsible for tracking and honoring opt-out requests. + sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End + User Messaging SMS and Voice automatically replies with a customizable message and adds the + end recipient to the OptOutList. When set to true you're responsible for responding to HELP + and STOP requests. You're also responsible for tracking and honoring opt-out requests. - `"TwoWayChannelArn"`: The Amazon Resource Name (ARN) of the two way channel. - `"TwoWayChannelRole"`: An optional IAM Role Arn for a service to assume, to be able to post inbound SMS messages. @@ -3540,10 +3552,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"OptOutListName"`: The OptOutList to associate with the pool. Valid values are either OptOutListName or OptOutListArn. - `"SelfManagedOptOutsEnabled"`: By default this is set to false. When an end recipient - sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon - Pinpoint automatically replies with a customizable message and adds the end recipient to - the OptOutList. When set to true you're responsible for responding to HELP and STOP - requests. You're also responsible for tracking and honoring opt-out requests. + sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End + User Messaging SMS and Voice automatically replies with a customizable message and adds the + end recipient to the OptOutList. When set to true you're responsible for responding to HELP + and STOP requests. You're also responsible for tracking and honoring opt-out requests. - `"SharedRoutesEnabled"`: Indicates whether shared routes are enabled for the pool. - `"TwoWayChannelArn"`: The Amazon Resource Name (ARN) of the two way channel. - `"TwoWayChannelRole"`: An optional IAM Role Arn for a service to assume, to be able to @@ -3625,7 +3637,7 @@ only applied to the specified NumberCapability type. - `country_rule_set_updates`: A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported - countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide. + countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide. - `number_capability`: The number capability to apply the CountryRuleSetUpdates updates to. - `protect_configuration_id`: The unique identifier for the protect configuration. diff --git a/src/services/qapps.jl b/src/services/qapps.jl new file mode 100644 index 0000000000..978be8f087 --- /dev/null +++ b/src/services/qapps.jl @@ -0,0 +1,1189 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: qapps +using AWS.Compat +using AWS.UUIDs + +""" + associate_library_item_review(instance-id, library_item_id) + associate_library_item_review(instance-id, library_item_id, params::Dict{String,<:Any}) + +Associates a rating or review for a library item with the user submitting the request. This +increments the rating count for the specified library item. + +# Arguments +- `instance-id`: The unique identifier for the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to associate the review with. + +""" +function associate_library_item_review( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.associateItemRating", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_library_item_review( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.associateItemRating", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + associate_qapp_with_user(app_id, instance-id) + associate_qapp_with_user(app_id, instance-id, params::Dict{String,<:Any}) + +This operation creates a link between the user's identity calling the operation and a +specific Q App. This is useful to mark the Q App as a favorite for the user if the user +doesn't own the Amazon Q App so they can still run it and see it in their inventory of Q +Apps. + +# Arguments +- `app_id`: The ID of the Amazon Q App to associate with the user. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function associate_qapp_with_user( + appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.install", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_qapp_with_user( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.install", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_library_item(app_id, app_version, categories, instance-id) + create_library_item(app_id, app_version, categories, instance-id, params::Dict{String,<:Any}) + +Creates a new library item for an Amazon Q App, allowing it to be discovered and used by +other allowed users. + +# Arguments +- `app_id`: The unique identifier of the Amazon Q App to publish to the library. +- `app_version`: The version of the Amazon Q App to publish to the library. +- `categories`: The categories to associate with the library item for easier discovery. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function create_library_item( + appId, + appVersion, + categories, + instance_id; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.createItem", + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "categories" => categories, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_library_item( + appId, + appVersion, + categories, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.createItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "categories" => categories, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_qapp(app_definition, instance-id, title) + create_qapp(app_definition, instance-id, title, params::Dict{String,<:Any}) + +Creates a new Amazon Q App based on the provided definition. The Q App definition specifies +the cards and flow of the Q App. This operation also calculates the dependencies between +the cards by inspecting the references in the prompts. + +# Arguments +- `app_definition`: The definition of the new Q App, specifying the cards and flow. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `title`: The title of the new Q App. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the new Q App. +- `"tags"`: Optional tags to associate with the new Q App. +""" +function create_qapp( + appDefinition, instance_id, title; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.create", + Dict{String,Any}( + "appDefinition" => appDefinition, + "title" => title, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_qapp( + appDefinition, + instance_id, + title, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.create", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appDefinition" => appDefinition, + "title" => title, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_library_item(instance-id, library_item_id) + delete_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Deletes a library item for an Amazon Q App, removing it from the library so it can no +longer be discovered or used by other users. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to delete. + +""" +function delete_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.deleteItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.deleteItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_qapp(app_id, instance-id) + delete_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Deletes an Amazon Q App owned by the user. If the Q App was previously published to the +library, it is also removed from the library. + +# Arguments +- `app_id`: The unique identifier of the Q App to delete. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function delete_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.delete", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.delete", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_library_item_review(instance-id, library_item_id) + disassociate_library_item_review(instance-id, library_item_id, params::Dict{String,<:Any}) + +Removes a rating or review previously submitted by the user for a library item. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to remove the review from. + +""" +function disassociate_library_item_review( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.disassociateItemRating", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_library_item_review( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.disassociateItemRating", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_qapp_from_user(app_id, instance-id) + disassociate_qapp_from_user(app_id, instance-id, params::Dict{String,<:Any}) + +Disassociates a Q App from a user removing the user's access to run the Q App. + +# Arguments +- `app_id`: The unique identifier of the Q App to disassociate from the user. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function disassociate_qapp_from_user( + appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.uninstall", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_qapp_from_user( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.uninstall", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_library_item(instance-id, library_item_id) + get_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Retrieves details about a library item for an Amazon Q App, including its metadata, +categories, ratings, and usage statistics. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appId"`: The unique identifier of the Amazon Q App associated with the library item. +""" +function get_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/catalog.getItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/catalog.getItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_qapp(app_id, instance-id) + get_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Retrieves the full details of an Q App, including its definition specifying the cards and +flow. + +# Arguments +- `app_id`: The unique identifier of the Q App to retrieve. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function get_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/apps.get", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/apps.get", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_qapp_session(instance-id, session_id) + get_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Retrieves the current state and results for an active session of an Amazon Q App. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to retrieve. + +""" +function get_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/runtime.getQAppSession", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/runtime.getQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + import_document(app_id, card_id, file_contents_base64, file_name, instance-id, scope) + import_document(app_id, card_id, file_contents_base64, file_name, instance-id, scope, params::Dict{String,<:Any}) + +Uploads a file that can then be used either as a default in a FileUploadCard from Q App +definition or as a file that is used inside a single Q App run. The purpose of the document +is determined by a scope parameter that indicates whether it is at the app definition level +or at the app session level. + +# Arguments +- `app_id`: The unique identifier of the Q App the file is associated with. +- `card_id`: The unique identifier of the card the file is associated with, if applicable. +- `file_contents_base64`: The base64-encoded contents of the file to upload. +- `file_name`: The name of the file being uploaded. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `scope`: Whether the file is associated with an Q App definition or a specific Q App + session. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"sessionId"`: The unique identifier of the Q App session the file is associated with, if + applicable. +""" +function import_document( + appId, + cardId, + fileContentsBase64, + fileName, + instance_id, + scope; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.importDocument", + Dict{String,Any}( + "appId" => appId, + "cardId" => cardId, + "fileContentsBase64" => fileContentsBase64, + "fileName" => fileName, + "scope" => scope, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_document( + appId, + cardId, + fileContentsBase64, + fileName, + instance_id, + scope, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.importDocument", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "cardId" => cardId, + "fileContentsBase64" => fileContentsBase64, + "fileName" => fileName, + "scope" => scope, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_library_items(instance-id) + list_library_items(instance-id, params::Dict{String,<:Any}) + +Lists the library items for Amazon Q Apps that are published and available for users in +your Amazon Web Services account. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"categoryId"`: Optional category to filter the library items by. +- `"limit"`: The maximum number of library items to return in the response. +- `"nextToken"`: The token to request the next page of results. +""" +function list_library_items(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/catalog.list", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_library_items( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/catalog.list", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_qapps(instance-id) + list_qapps(instance-id, params::Dict{String,<:Any}) + +Lists the Amazon Q Apps owned by or associated with the user either because they created it +or because they used it from the library in the past. The user identity is extracted from +the credentials used to invoke this operation.. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"limit"`: The maximum number of Q Apps to return in the response. +- `"nextToken"`: The token to request the next page of results. +""" +function list_qapps(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/apps.list", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_qapps( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/apps.list", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists the tags associated with an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource whose tags should be + listed. + +""" +function list_tags_for_resource( + resourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/tags/$(resourceARN)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/tags/$(resourceARN)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + predict_qapp(instance-id) + predict_qapp(instance-id, params::Dict{String,<:Any}) + +Generates an Amazon Q App definition based on either a conversation or a problem statement +provided as input.The resulting app definition can be used to call CreateQApp. This API +doesn't create Amazon Q Apps directly. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"options"`: The input to generate the Q App definition from, either a conversation or + problem statement. +""" +function predict_qapp(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.predictQApp", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function predict_qapp( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.predictQApp", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_qapp_session(app_id, app_version, instance-id) + start_qapp_session(app_id, app_version, instance-id, params::Dict{String,<:Any}) + +Starts a new session for an Amazon Q App, allowing inputs to be provided and the app to be +run. Each Q App session will be condensed into a single conversation in the web +experience. + +# Arguments +- `app_id`: The unique identifier of the Q App to start a session for. +- `app_version`: The version of the Q App to use for the session. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"initialValues"`: Optional initial input values to provide for the Q App session. +- `"tags"`: Optional tags to associate with the new Q App session. +""" +function start_qapp_session( + appId, appVersion, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.startQAppSession", + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_qapp_session( + appId, + appVersion, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.startQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_qapp_session(instance-id, session_id) + stop_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Stops an active session for an Amazon Q App.This deletes all data related to the session +and makes it invalid for future uses. The results of the session will be persisted as part +of the conversation. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to stop. + +""" +function stop_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.deleteMiniAppRun", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.deleteMiniAppRun", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Associates tags with an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to tag. +- `tags`: The tags to associate with the resource. + +""" +function tag_resource(resourceARN, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/tags/$(resourceARN)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceARN, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Disassociates tags from an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to disassociate the tag + from. +- `tag_keys`: The keys of the tags to disassociate from the resource. + +""" +function untag_resource( + resourceARN, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "DELETE", + "/tags/$(resourceARN)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceARN, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "DELETE", + "/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_library_item(instance-id, library_item_id) + update_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Updates the metadata and status of a library item for an Amazon Q App. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"categories"`: The new categories to associate with the library item. +- `"status"`: The new status to set for the library item, such as \"Published\" or + \"Hidden\". +""" +function update_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.updateItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.updateItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_qapp(app_id, instance-id) + update_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Updates an existing Amazon Q App, allowing modifications to its title, description, and +definition. + +# Arguments +- `app_id`: The unique identifier of the Q App to update. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appDefinition"`: The new definition specifying the cards and flow for the Q App. +- `"description"`: The new description for the Q App. +- `"title"`: The new title for the Q App. +""" +function update_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.update", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.update", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_qapp_session(instance-id, session_id) + update_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Updates the session for a given Q App sessionId. This is only valid when at least one card +of the session is in the WAITING state. Data for each WAITING card can be provided as +input. If inputs are not provided, the call will be accepted but session will not move +forward. Inputs for cards that are not in the WAITING status will be ignored. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to provide input for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"values"`: The input values to provide for the current state of the Q App session. +""" +function update_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.updateQAppSession", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.updateQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/qbusiness.jl b/src/services/qbusiness.jl index 9fb58ab1af..4ef143385a 100644 --- a/src/services/qbusiness.jl +++ b/src/services/qbusiness.jl @@ -191,6 +191,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. +- `"personalizationConfiguration"`: Configuration information about chat response + personalization. For more information, see Personalizing chat responses - `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in the web experience. - `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role with permissions to access @@ -2140,6 +2142,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. +- `"personalizationConfiguration"`: Configuration information about chat response + personalization. For more information, see Personalizing chat responses. - `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in the web experience. - `"roleArn"`: An Amazon Web Services Identity and Access Management (IAM) role that gives diff --git a/src/services/qconnect.jl b/src/services/qconnect.jl index 29c813e3b2..1680362d5f 100644 --- a/src/services/qconnect.jl +++ b/src/services/qconnect.jl @@ -195,6 +195,80 @@ function create_content( ) end +""" + create_content_association(association, association_type, content_id, knowledge_base_id) + create_content_association(association, association_type, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Creates an association between a content resource in a knowledge base and step-by-step +guides. Step-by-step guides offer instructions to agents for resolving common customer +issues. You create a content association to integrate Amazon Q in Connect and step-by-step +guides. After you integrate Amazon Q and step-by-step guides, when Amazon Q provides a +recommendation to an agent based on the intent that it's detected, it also provides them +with the option to start the step-by-step guide that you have associated with the content. +Note the following limitations: You can create only one content association for each +content resource in a knowledge base. You can associate a step-by-step guide with +multiple content resources. For more information, see Integrate Amazon Q in Connect with +step-by-step guides in the Amazon Connect Administrator Guide. + +# Arguments +- `association`: The identifier of the associated resource. +- `association_type`: The type of association. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_content_association( + association, + associationType, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_content_association( + association, + associationType, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_knowledge_base(knowledge_base_type, name) create_knowledge_base(knowledge_base_type, name, params::Dict{String,<:Any}) @@ -502,6 +576,50 @@ function delete_content( ) end +""" + delete_content_association(content_association_id, content_id, knowledge_base_id) + delete_content_association(content_association_id, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Deletes the content association. For more information about content associations--what +they are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides +in the Amazon Connect Administrator Guide. + +# Arguments +- `content_association_id`: The identifier of the content association. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +""" +function delete_content_association( + contentAssociationId, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_content_association( + contentAssociationId, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_import_job(import_job_id, knowledge_base_id) delete_import_job(import_job_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -725,6 +843,50 @@ function get_content( ) end +""" + get_content_association(content_association_id, content_id, knowledge_base_id) + get_content_association(content_association_id, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Returns the content association. For more information about content associations--what they +are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in +the Amazon Connect Administrator Guide. + +# Arguments +- `content_association_id`: The identifier of the content association. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +""" +function get_content_association( + contentAssociationId, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_content_association( + contentAssociationId, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_content_summary(content_id, knowledge_base_id) get_content_summary(content_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1026,6 +1188,49 @@ function list_assistants( ) end +""" + list_content_associations(content_id, knowledge_base_id) + list_content_associations(content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Lists the content associations. For more information about content associations--what they +are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in +the Amazon Connect Administrator Guide. + +# Arguments +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_content_associations( + contentId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_content_associations( + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_contents(knowledge_base_id) list_contents(knowledge_base_id, params::Dict{String,<:Any}) diff --git a/src/services/quicksight.jl b/src/services/quicksight.jl index 6b911be5f0..bfc5e371a1 100644 --- a/src/services/quicksight.jl +++ b/src/services/quicksight.jl @@ -4,6 +4,88 @@ using AWS.AWSServices: quicksight using AWS.Compat using AWS.UUIDs +""" + batch_create_topic_reviewed_answer(answers, aws_account_id, topic_id) + batch_create_topic_reviewed_answer(answers, aws_account_id, topic_id, params::Dict{String,<:Any}) + +Creates new reviewed answers for a Q Topic. + +# Arguments +- `answers`: The definition of the Answers to be created. +- `aws_account_id`: The ID of the Amazon Web Services account that you want to create a + reviewed answer in. +- `topic_id`: The ID for the topic reviewed answer that you want to create. This ID is + unique per Amazon Web Services Region for each Amazon Web Services account. + +""" +function batch_create_topic_reviewed_answer( + Answers, AwsAccountId, TopicId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-create-reviewed-answers", + Dict{String,Any}("Answers" => Answers); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_create_topic_reviewed_answer( + Answers, + AwsAccountId, + TopicId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-create-reviewed-answers", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Answers" => Answers), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_delete_topic_reviewed_answer(aws_account_id, topic_id) + batch_delete_topic_reviewed_answer(aws_account_id, topic_id, params::Dict{String,<:Any}) + +Deletes reviewed answers for Q Topic. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that you want to delete a + reviewed answers in. +- `topic_id`: The ID for the topic reviewed answer that you want to delete. This ID is + unique per Amazon Web Services Region for each Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AnswerIds"`: The Answer IDs of the Answers to be deleted. +""" +function batch_delete_topic_reviewed_answer( + AwsAccountId, TopicId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-delete-reviewed-answers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_delete_topic_reviewed_answer( + AwsAccountId, + TopicId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-delete-reviewed-answers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ cancel_ingestion(aws_account_id, data_set_id, ingestion_id) cancel_ingestion(aws_account_id, data_set_id, ingestion_id, params::Dict{String,<:Any}) @@ -5643,6 +5725,44 @@ function list_topic_refresh_schedules( ) end +""" + list_topic_reviewed_answers(aws_account_id, topic_id) + list_topic_reviewed_answers(aws_account_id, topic_id, params::Dict{String,<:Any}) + +Lists all reviewed answers for a Q Topic. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that containd the reviewed + answers that you want listed. +- `topic_id`: The ID for the topic that contains the reviewed answer that you want to list. + This ID is unique per Amazon Web Services Region for each Amazon Web Services account. + +""" +function list_topic_reviewed_answers( + AwsAccountId, TopicId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/reviewed-answers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_topic_reviewed_answers( + AwsAccountId, + TopicId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/reviewed-answers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_topics(aws_account_id) list_topics(aws_account_id, params::Dict{String,<:Any}) diff --git a/src/services/rds.jl b/src/services/rds.jl index aaddc12b25..0593bda179 100644 --- a/src/services/rds.jl +++ b/src/services/rds.jl @@ -1146,7 +1146,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DBSubnetGroupName"`: A DB subnet group to associate with this DB cluster. This setting is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must match the name of an existing DB subnet group. - Must not be default. Example: mydbsubnetgroup + Example: mydbsubnetgroup - `"DBSystemId"`: Reserved for future use. - `"DatabaseName"`: The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional @@ -1365,22 +1365,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Tue | Wed | Thu | Fri | Sat | Sun. Must be in Universal Coordinated Time (UTC). Must be at least 30 minutes. - `"PubliclyAccessible"`: Specifies whether the DB cluster is publicly accessible. When the - DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the - private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to - the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is - ultimately controlled by the security group it uses. That public access isn't permitted if - the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't - publicly accessible, it is an internal DB cluster with a DNS name that resolves to a - private IP address. Valid for Cluster Type: Multi-AZ DB clusters only Default: The default - behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName - isn't specified, and PubliclyAccessible isn't specified, the following applies: If the - default VPC in the target Region doesn’t have an internet gateway attached to it, the DB - cluster is private. If the default VPC in the target Region has an internet gateway - attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and - PubliclyAccessible isn't specified, the following applies: If the subnets are part of a - VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If - the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster - is public. + DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual + private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP + address. When you connect from within the same VPC as the DB cluster, the endpoint resolves + to the private IP address. Access to the DB cluster is ultimately controlled by the + security group it uses. That public access isn't permitted if the security group assigned + to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is + an internal DB cluster with a DNS name that resolves to a private IP address. Valid for + Cluster Type: Multi-AZ DB clusters only Default: The default behavior varies depending on + whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and + PubliclyAccessible isn't specified, the following applies: If the default VPC in the + target Region doesn’t have an internet gateway attached to it, the DB cluster is private. + If the default VPC in the target Region has an internet gateway attached to it, the DB + cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't + specified, the following applies: If the subnets are part of a VPC that doesn’t have an + internet gateway attached to it, the DB cluster is private. If the subnets are part of a + VPC that has an internet gateway attached to it, the DB cluster is public. - `"RdsCustomClusterConfiguration"`: Reserved for future use. - `"ReplicationSourceIdentifier"`: The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica. Valid for Cluster Type: @@ -2049,21 +2049,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 - `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When - the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to - the private IP address from within the DB instance's virtual private cloud (VPC). It - resolves to the public IP address from outside of the DB instance's VPC. Access to the DB - instance is ultimately controlled by the security group it uses. That public access is not - permitted if the security group assigned to the DB instance doesn't permit it. When the DB - instance isn't publicly accessible, it is an internal DB instance with a DNS name that - resolves to a private IP address. Default: The default behavior varies depending on whether - DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and - PubliclyAccessible isn't specified, the following applies: If the default VPC in the - target Region doesn’t have an internet gateway attached to it, the DB instance is - private. If the default VPC in the target Region has an internet gateway attached to it, - the DB instance is public. If DBSubnetGroupName is specified, and PubliclyAccessible - isn't specified, the following applies: If the subnets are part of a VPC that doesn’t - have an internet gateway attached to it, the DB instance is private. If the subnets are - part of a VPC that has an internet gateway attached to it, the DB instance is public. + the DB instance is publicly accessible and you connect from outside of the DB instance's + virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public + IP address. When you connect from within the same VPC as the DB instance, the endpoint + resolves to the private IP address. Access to the DB instance is ultimately controlled by + the security group it uses. That public access is not permitted if the security group + assigned to the DB instance doesn't permit it. When the DB instance isn't publicly + accessible, it is an internal DB instance with a DNS name that resolves to a private IP + address. Default: The default behavior varies depending on whether DBSubnetGroupName is + specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, + the following applies: If the default VPC in the target Region doesn’t have an internet + gateway attached to it, the DB instance is private. If the default VPC in the target + Region has an internet gateway attached to it, the DB instance is public. If + DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following + applies: If the subnets are part of a VPC that doesn’t have an internet gateway + attached to it, the DB instance is private. If the subnets are part of a VPC that has an + internet gateway attached to it, the DB instance is public. - `"StorageEncrypted"`: Specifes whether the DB instance is encrypted. By default, it isn't encrypted. For RDS Custom DB instances, either enable this setting or leave it unset. Otherwise, Amazon RDS reports an error. This setting doesn't apply to Amazon Aurora DB @@ -2757,6 +2758,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. +- `"MinACU"`: The minimum capacity of the DB shard group in Aurora capacity units (ACUs). - `"PubliclyAccessible"`: Specifies whether the DB shard group is publicly accessible. When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It @@ -3432,16 +3434,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted. - `"FinalDBSnapshotIdentifier"`: The DB cluster snapshot identifier of the new DB cluster - snapshot created when SkipFinalSnapshot is disabled. Specifying this parameter and also - skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter - results in an error. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First - character must be a letter Can't end with a hyphen or contain two consecutive hyphens + snapshot created when SkipFinalSnapshot is disabled. If you specify this parameter and + also skip the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter, + the request results in an error. Constraints: Must be 1 to 255 letters, numbers, or + hyphens. First character must be a letter Can't end with a hyphen or contain two + consecutive hyphens - `"SkipFinalSnapshot"`: Specifies whether to skip the creation of a final DB cluster - snapshot before the DB cluster is deleted. If skip is specified, no DB cluster snapshot is - created. If skip isn't specified, a DB cluster snapshot is created before the DB cluster is - deleted. By default, skip isn't specified, and the DB cluster snapshot is created. By - default, this parameter is disabled. You must specify a FinalDBSnapshotIdentifier - parameter if SkipFinalSnapshot is disabled. + snapshot before RDS deletes the DB cluster. If you set this value to true, RDS doesn't + create a final DB cluster snapshot. If you set this value to false or don't specify it, RDS + creates a DB cluster snapshot before it deletes the DB cluster. By default, this parameter + is disabled, so RDS creates a final DB cluster snapshot. If SkipFinalSnapshot is disabled, + you must specify a value for the FinalDBSnapshotIdentifier parameter. """ function delete_dbcluster( DBClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -6204,7 +6207,11 @@ end describe_pending_maintenance_actions(params::Dict{String,<:Any}) Returns a list of resources (for example, DB instances) that have at least one pending -maintenance action. +maintenance action. This API follows an eventual consistency model. This means that the +result of the DescribePendingMaintenanceActions command might not be immediately visible to +all subsequent RDS commands. Keep this in mind when you use +DescribePendingMaintenanceActions immediately after using a previous API command such as +ApplyPendingMaintenanceActions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7872,16 +7879,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 - `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When - the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to - the private IP address from within the DB cluster's virtual private cloud (VPC). It - resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB - cluster is ultimately controlled by the security group it uses. That public access isn't - permitted if the security group assigned to the DB cluster doesn't permit it. When the DB - instance isn't publicly accessible, it is an internal DB instance with a DNS name that - resolves to a private IP address. PubliclyAccessible only applies to DB instances in a - VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled - for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied - immediately regardless of the value of the ApplyImmediately parameter. + the DB instance is publicly accessible and you connect from outside of the DB instance's + virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public + IP address. When you connect from within the same VPC as the DB instance, the endpoint + resolves to the private IP address. Access to the DB instance is ultimately controlled by + the security group it uses. That public access isn't permitted if the security group + assigned to the DB instance doesn't permit it. When the DB instance isn't publicly + accessible, it is an internal DB instance with a DNS name that resolves to a private IP + address. PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be + part of a public subnet and PubliclyAccessible must be enabled for it to be publicly + accessible. Changes to the PubliclyAccessible parameter are applied immediately regardless + of the value of the ApplyImmediately parameter. - `"ReplicaMode"`: A value that sets the open mode of a replica database to either mounted or read-only. Currently, this parameter is only supported for Oracle DB instances. Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for @@ -8252,6 +8260,7 @@ more settings by specifying these parameters and the new values in the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxACU"`: The maximum capacity of the DB shard group in Aurora capacity units (ACUs). +- `"MinACU"`: The minimum capacity of the DB shard group in Aurora capacity units (ACUs). """ function modify_dbshard_group( DBShardGroupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -11051,11 +11060,10 @@ end Starts an export of DB snapshot or DB cluster data to Amazon S3. The provided IAM role must have access to the S3 bucket. You can't export snapshot data from Db2 or RDS Custom DB -instances. You can't export cluster data from Multi-AZ DB clusters. For more information on -exporting DB snapshot data, see Exporting DB snapshot data to Amazon S3 in the Amazon RDS -User Guide or Exporting DB cluster snapshot data to Amazon S3 in the Amazon Aurora User -Guide. For more information on exporting DB cluster data, see Exporting DB cluster data to -Amazon S3 in the Amazon Aurora User Guide. +instances. For more information on exporting DB snapshot data, see Exporting DB snapshot +data to Amazon S3 in the Amazon RDS User Guide or Exporting DB cluster snapshot data to +Amazon S3 in the Amazon Aurora User Guide. For more information on exporting DB cluster +data, see Exporting DB cluster data to Amazon S3 in the Amazon Aurora User Guide. # Arguments - `export_task_identifier`: A unique identifier for the export task. This ID isn't an diff --git a/src/services/redshift_serverless.jl b/src/services/redshift_serverless.jl index a322e899d4..0fb80814d1 100644 --- a/src/services/redshift_serverless.jl +++ b/src/services/redshift_serverless.jl @@ -245,8 +245,8 @@ operation. action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, - see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster - Management Guide + see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management + Guide - `schedule`: The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, 2016-03-04T17:27:00. @@ -501,6 +501,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"enhancedVpcRouting"`: The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC instead of over the internet. +- `"ipAddressType"`: The IP address type that the workgroup supports. Possible values are + ipv4 and dualstack. - `"maxCapacity"`: The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs. - `"port"`: The custom port to use when connecting to a workgroup. Valid port ranges are @@ -2201,8 +2203,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, - see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster - Management Guide + see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management + Guide - `"schedule"`: The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, @@ -2389,6 +2391,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"enhancedVpcRouting"`: The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC. +- `"ipAddressType"`: The IP address type that the workgroup supports. Possible values are + ipv4 and dualstack. - `"maxCapacity"`: The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs. - `"port"`: The custom port to use when connecting to a workgroup. Valid port ranges are diff --git a/src/services/rekognition.jl b/src/services/rekognition.jl index ea1220c2fc..cc561675d6 100644 --- a/src/services/rekognition.jl +++ b/src/services/rekognition.jl @@ -349,6 +349,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys existing dataset or specify the Amazon S3 bucket location of an Amazon Sagemaker format manifest file. If you don't specify datasetSource, an empty dataset is created. To add labeled images to the dataset, You can use the console or call UpdateDatasetEntries. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the dataset. """ function create_dataset( DatasetType, ProjectArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -441,6 +442,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for Content Moderation. Applicable only to adapters. - `"Feature"`: Specifies feature that is being customized. If no value is provided CUSTOM_LABELS is used as a default. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the project. """ function create_project(ProjectName; aws_config::AbstractAWSConfig=global_aws_config()) return rekognition( @@ -2148,7 +2150,9 @@ in the sample seen below. Use MaxResults parameter to limit the number of label If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request -parameter with the token value returned from the previous call to GetLabelDetection. +parameter with the token value returned from the previous call to GetLabelDetection. If you +are retrieving results while using the Amazon Simple Notification Service, note that you +will receive an \"ERROR\" notification if the job encounters an issue. # Arguments - `job_id`: Job identifier for the label detection operation for which you want results diff --git a/src/services/resiliencehub.jl b/src/services/resiliencehub.jl index 55f64d15ed..1dd955516c 100644 --- a/src/services/resiliencehub.jl +++ b/src/services/resiliencehub.jl @@ -4,6 +4,52 @@ using AWS.AWSServices: resiliencehub using AWS.Compat using AWS.UUIDs +""" + accept_resource_grouping_recommendations(app_arn, entries) + accept_resource_grouping_recommendations(app_arn, entries, params::Dict{String,<:Any}) + +Accepts the resource grouping recommendations suggested by Resilience Hub for your +application. + +# Arguments +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. +- `entries`: Indicates the list of resource grouping recommendations you want to include in + your application. + +""" +function accept_resource_grouping_recommendations( + appArn, entries; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/accept-resource-grouping-recommendations", + Dict{String,Any}("appArn" => appArn, "entries" => entries); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function accept_resource_grouping_recommendations( + appArn, + entries, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return resiliencehub( + "POST", + "/accept-resource-grouping-recommendations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("appArn" => appArn, "entries" => entries), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ add_draft_app_version_resource_mappings(app_arn, resource_mappings) add_draft_app_version_resource_mappings(app_arn, resource_mappings, params::Dict{String,<:Any}) @@ -11,7 +57,7 @@ using AWS.UUIDs Adds the source of resource-maps to the draft version of an application. During assessment, Resilience Hub will use these resource-maps to resolve the latest physical ID for each resource in the application template. For more information about different types of -resources suported by Resilience Hub and how to add them in your application, see Step 2: +resources supported by Resilience Hub and how to add them in your application, see Step 2: How is your application managed? in the Resilience Hub User Guide. # Arguments @@ -1017,7 +1063,7 @@ end describe_app_version_resource(app_arn, app_version, params::Dict{String,<:Any}) Describes a resource of the Resilience Hub application. This API accepts only one of the -following parameters to descibe the resource: resourceName logicalResourceId +following parameters to describe the resource: resourceName logicalResourceId physicalResourceId (Along with physicalResourceId, you can also provide awsAccountId, and awsRegion) @@ -1245,6 +1291,46 @@ function describe_resiliency_policy( ) end +""" + describe_resource_grouping_recommendation_task(app_arn) + describe_resource_grouping_recommendation_task(app_arn, params::Dict{String,<:Any}) + +Describes the resource grouping recommendation tasks run by Resilience Hub for your +application. + +# Arguments +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"groupingId"`: Indicates the identifier of the grouping recommendation task. +""" +function describe_resource_grouping_recommendation_task( + appArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/describe-resource-grouping-recommendation-task", + Dict{String,Any}("appArn" => appArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_resource_grouping_recommendation_task( + appArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/describe-resource-grouping-recommendation-task", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("appArn" => appArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ import_resources_to_draft_app_version(app_arn) import_resources_to_draft_app_version(app_arn, params::Dict{String,<:Any}) @@ -1351,9 +1437,8 @@ List of compliance drifts that were detected while running an assessment. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Indicates the maximum number of applications requested. -- `"nextToken"`: Indicates the unique token number of the next application to be checked - for compliance and regulatory requirements from the list of applications. +- `"maxResults"`: Indicates the maximum number of compliance drifts requested. +- `"nextToken"`: Null, or the token from a previous call to get the next set of results. """ function list_app_assessment_compliance_drifts( assessmentArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1842,7 +1927,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"nextToken"`: Null, or the token from a previous call to get the next set of results. - `"reverseOrder"`: The application list is sorted based on the values of lastAppComplianceEvaluationTime field. By default, application list is sorted in ascending - order. To sort the appliation list in descending order, set this field to True. + order. To sort the application list in descending order, set this field to True. - `"toLastAssessmentTime"`: Indicates the upper limit of the range that is used to filter the applications based on their last assessment times. """ @@ -1936,6 +2021,45 @@ function list_resiliency_policies( ) end +""" + list_resource_grouping_recommendations() + list_resource_grouping_recommendations(params::Dict{String,<:Any}) + +Lists the resource grouping recommendations suggested by Resilience Hub for your +application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appArn"`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. +- `"maxResults"`: Maximum number of grouping recommendations to be displayed per Resilience + Hub application. +- `"nextToken"`: Null, or the token from a previous call to get the next set of results. +""" +function list_resource_grouping_recommendations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "GET", + "/list-resource-grouping-recommendations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_resource_grouping_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "GET", + "/list-resource-grouping-recommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_sop_recommendations(assessment_arn) list_sop_recommendations(assessment_arn, params::Dict{String,<:Any}) @@ -2288,6 +2412,51 @@ function put_draft_app_version_template( ) end +""" + reject_resource_grouping_recommendations(app_arn, entries) + reject_resource_grouping_recommendations(app_arn, entries, params::Dict{String,<:Any}) + +Rejects resource grouping recommendations. + +# Arguments +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. +- `entries`: Indicates the list of resource grouping recommendations you have selected to + exclude from your application. + +""" +function reject_resource_grouping_recommendations( + appArn, entries; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/reject-resource-grouping-recommendations", + Dict{String,Any}("appArn" => appArn, "entries" => entries); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reject_resource_grouping_recommendations( + appArn, + entries, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return resiliencehub( + "POST", + "/reject-resource-grouping-recommendations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("appArn" => appArn, "entries" => entries), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ remove_draft_app_version_resource_mappings(app_arn) remove_draft_app_version_resource_mappings(app_arn, params::Dict{String,<:Any}) @@ -2450,6 +2619,42 @@ function start_app_assessment( ) end +""" + start_resource_grouping_recommendation_task(app_arn) + start_resource_grouping_recommendation_task(app_arn, params::Dict{String,<:Any}) + +Starts grouping recommendation task. + +# Arguments +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. + +""" +function start_resource_grouping_recommendation_task( + appArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/start-resource-grouping-recommendation-task", + Dict{String,Any}("appArn" => appArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_resource_grouping_recommendation_task( + appArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/start-resource-grouping-recommendation-task", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("appArn" => appArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) diff --git a/src/services/rolesanywhere.jl b/src/services/rolesanywhere.jl index 25794f50c5..62602676fa 100644 --- a/src/services/rolesanywhere.jl +++ b/src/services/rolesanywhere.jl @@ -19,6 +19,8 @@ You use profiles to intersect permissions with IAM managed policies. Required p # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"acceptRoleSessionName"`: Used to determine if a custom role session name will be + accepted in a temporary credential request. - `"durationSeconds"`: Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600. @@ -1071,6 +1073,8 @@ permissions: rolesanywhere:UpdateProfile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"acceptRoleSessionName"`: Used to determine if a custom role session name will be + accepted in a temporary credential request. - `"durationSeconds"`: Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600. diff --git a/src/services/s3.jl b/src/services/s3.jl index d9db5a2659..41a5ef3d1d 100644 --- a/src/services/s3.jl +++ b/src/services/s3.jl @@ -301,30 +301,29 @@ Amazon Web Services Identity and Access Management (IAM) identity-based policies Express One Zone in the Amazon S3 User Guide. Response and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to -read the entire response body to check if the copy succeeds. to keep the connection alive -while we copy the data. If the copy is successful, you receive a response with -information about the copied object. A copy request might return an error when Amazon S3 -receives the copy request or while Amazon S3 is copying the files. A 200 OK response can -contain either a success or an error. If the error occurs before the copy action starts, -you receive a standard Amazon S3 error. If the error occurs during the copy operation, -the error response is embedded in the 200 OK response. For example, in a cross-region copy, -you may encounter throttling and receive a 200 OK response. For more information, see -Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code -means the copy was accepted, but it doesn't mean the copy is complete. Another example is -when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the -copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the -entire response is successfully received and processed. If you call this API operation -directly, make sure to design your application to parse the content of the response and -handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. -The SDKs detect the embedded error and apply error handling per your configuration settings -(including automatically retrying the request as appropriate). If the condition persists, -the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an -error). Charge The copy request charge is based on the storage class and Region that -you specify for the destination object. The request can also result in a data retrieval -charge for the source if the source storage class bills for data retrieval. If the copy -source is in a different region, the data transfer is billed to the copy source account. -For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory -buckets - The HTTP Host header syntax is +read the entire response body to check if the copy succeeds. If the copy is successful, +you receive a response with information about the copied object. A copy request might +return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the +files. A 200 OK response can contain either a success or an error. If the error occurs +before the copy action starts, you receive a standard Amazon S3 error. If the error +occurs during the copy operation, the error response is embedded in the 200 OK response. +For example, in a cross-region copy, you may encounter throttling and receive a 200 OK +response. For more information, see Resolve the Error 200 response when copying objects to +Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy +is complete. Another example is when you disconnect from Amazon S3 before the copy is +complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must +stay connected to Amazon S3 until the entire response is successfully received and +processed. If you call this API operation directly, make sure to design your application to +parse the content of the response and handle it appropriately. If you use Amazon Web +Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply +error handling per your configuration settings (including automatically retrying the +request as appropriate). If the condition persists, the SDKs throw an exception (or, for +the SDKs that don't use exceptions, they return an error). Charge The copy request +charge is based on the storage class and Region that you specify for the destination +object. The request can also result in a data retrieval charge for the source if the source +storage class bills for data retrieval. If the copy source is in a different region, the +data transfer is billed to the copy source account. For pricing information, see Amazon S3 +pricing. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CopyObject: PutObject GetObject @@ -2012,7 +2011,7 @@ Permissions General purpose bucket permissions - The following permissions a in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always specify the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific version of -an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion permission. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission @@ -4150,6 +4149,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"partNumber"`: Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object. +- `"response-cache-control"`: Sets the Cache-Control header of the response. +- `"response-content-disposition"`: Sets the Content-Disposition header of the response. +- `"response-content-encoding"`: Sets the Content-Encoding header of the response. +- `"response-content-language"`: Sets the Content-Language header of the response. +- `"response-content-type"`: Sets the Content-Type header of the response. +- `"response-expires"`: Sets the Expires header of the response. - `"versionId"`: Version ID used to reference a specific version of the object. For directory buckets in this API operation, only the null value of the version ID is supported. @@ -7804,12 +7809,12 @@ bucket, you must have the s3:GetObject permission to read the source object th copied. If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket. For information about permissions required to use the multipart upload API, see Multipart -Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - You -must have permissions in a bucket policy or an IAM identity-based policy based on the +upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - +You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession -permission in the Action element of a policy to read the object . By default, the session -is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the +permission in the Action element of a policy to read the object. By default, the session is +in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The diff --git a/src/services/sagemaker.jl b/src/services/sagemaker.jl index bf951fe784..91bd67d209 100644 --- a/src/services/sagemaker.jl +++ b/src/services/sagemaker.jl @@ -3532,6 +3532,101 @@ function create_notebook_instance_lifecycle_config( ) end +""" + create_optimization_job(deployment_instance_type, model_source, optimization_configs, optimization_job_name, output_config, role_arn, stopping_condition) + create_optimization_job(deployment_instance_type, model_source, optimization_configs, optimization_job_name, output_config, role_arn, stopping_condition, params::Dict{String,<:Any}) + +Creates a job that optimizes a model for inference performance. To create the job, you +provide the location of a source model, and you provide the settings for the optimization +techniques that you want the job to apply. When the job completes successfully, SageMaker +uploads the new optimized model to the output destination that you specify. For more +information about how to use this action, and about the supported optimization techniques, +see Optimize model inference with Amazon SageMaker. + +# Arguments +- `deployment_instance_type`: The type of instance that hosts the optimized model that you + create with the optimization job. +- `model_source`: The location of the source model to optimize with an optimization job. +- `optimization_configs`: Settings for each of the optimization techniques that the job + applies. +- `optimization_job_name`: A custom name for the new optimization job. +- `output_config`: Details for where to store the optimized model that you create with the + optimization job. +- `role_arn`: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker + to perform tasks on your behalf. During model optimization, Amazon SageMaker needs your + permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket + Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant + permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, + the caller of this API must have the iam:PassRole permission. For more information, see + Amazon SageMaker Roles. +- `stopping_condition`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"OptimizationEnvironment"`: The environment variables to set in the model container. +- `"Tags"`: A list of key-value pairs associated with the optimization job. For more + information, see Tagging Amazon Web Services resources in the Amazon Web Services General + Reference Guide. +- `"VpcConfig"`: A VPC in Amazon VPC that your optimized model has access to. +""" +function create_optimization_job( + DeploymentInstanceType, + ModelSource, + OptimizationConfigs, + OptimizationJobName, + OutputConfig, + RoleArn, + StoppingCondition; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateOptimizationJob", + Dict{String,Any}( + "DeploymentInstanceType" => DeploymentInstanceType, + "ModelSource" => ModelSource, + "OptimizationConfigs" => OptimizationConfigs, + "OptimizationJobName" => OptimizationJobName, + "OutputConfig" => OutputConfig, + "RoleArn" => RoleArn, + "StoppingCondition" => StoppingCondition, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_optimization_job( + DeploymentInstanceType, + ModelSource, + OptimizationConfigs, + OptimizationJobName, + OutputConfig, + RoleArn, + StoppingCondition, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DeploymentInstanceType" => DeploymentInstanceType, + "ModelSource" => ModelSource, + "OptimizationConfigs" => OptimizationConfigs, + "OptimizationJobName" => OptimizationJobName, + "OutputConfig" => OutputConfig, + "RoleArn" => RoleArn, + "StoppingCondition" => StoppingCondition, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_pipeline(client_request_token, pipeline_name, role_arn) create_pipeline(client_request_token, pipeline_name, role_arn, params::Dict{String,<:Any}) @@ -5503,7 +5598,8 @@ Delete a hub content reference in order to remove a model from a private hub. # Arguments - `hub_content_name`: The name of the hub content to delete. -- `hub_content_type`: The type of hub content to delete. +- `hub_content_type`: The type of hub content reference to delete. The only supported type + of hub content reference to delete is ModelReference. - `hub_name`: The name of the hub to delete the hub content reference from. """ @@ -6240,6 +6336,45 @@ function delete_notebook_instance_lifecycle_config( ) end +""" + delete_optimization_job(optimization_job_name) + delete_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Deletes an optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function delete_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DeleteOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_pipeline(client_request_token, pipeline_name) delete_pipeline(client_request_token, pipeline_name, params::Dict{String,<:Any}) @@ -8493,6 +8628,45 @@ function describe_notebook_instance_lifecycle_config( ) end +""" + describe_optimization_job(optimization_job_name) + describe_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Provides the properties of the specified optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function describe_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DescribeOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DescribeOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_pipeline(pipeline_name) describe_pipeline(pipeline_name, params::Dict{String,<:Any}) @@ -11782,6 +11956,53 @@ function list_notebook_instances( ) end +""" + list_optimization_jobs() + list_optimization_jobs(params::Dict{String,<:Any}) + +Lists the optimization jobs in your account and their properties. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreationTimeAfter"`: Filters the results to only those optimization jobs that were + created after the specified time. +- `"CreationTimeBefore"`: Filters the results to only those optimization jobs that were + created before the specified time. +- `"LastModifiedTimeAfter"`: Filters the results to only those optimization jobs that were + updated after the specified time. +- `"LastModifiedTimeBefore"`: Filters the results to only those optimization jobs that were + updated before the specified time. +- `"MaxResults"`: The maximum number of optimization jobs to return in the response. The + default is 50. +- `"NameContains"`: Filters the results to only those optimization jobs with a name that + contains the specified string. +- `"NextToken"`: A token that you use to get the next set of results following a truncated + response. If the response to the previous request was truncated, that response provides the + value for this token. +- `"OptimizationContains"`: Filters the results to only those optimization jobs that apply + the specified optimization techniques. You can specify either Quantization or Compilation. +- `"SortBy"`: The field by which to sort the optimization jobs in the response. The default + is CreationTime +- `"SortOrder"`: The sort order for results. The default is Ascending +- `"StatusEquals"`: Filters the results to only those optimization jobs with the specified + status. +""" +function list_optimization_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListOptimizationJobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_optimization_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "ListOptimizationJobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_pipeline_execution_steps() list_pipeline_execution_steps(params::Dict{String,<:Any}) @@ -13663,6 +13884,45 @@ function stop_notebook_instance( ) end +""" + stop_optimization_job(optimization_job_name) + stop_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Ends a running inference optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function stop_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "StopOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "StopOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_pipeline_execution(client_request_token, pipeline_execution_arn) stop_pipeline_execution(client_request_token, pipeline_execution_arn, params::Dict{String,<:Any}) diff --git a/src/services/secrets_manager.jl b/src/services/secrets_manager.jl index 90c4910c41..3cf851578a 100644 --- a/src/services/secrets_manager.jl +++ b/src/services/secrets_manager.jl @@ -133,7 +133,10 @@ secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and -kms:Decrypt permission to the key. +kms:Decrypt permission to the key. When you enter commands in a command shell, there is a +risk of the command history being accessed or utilities having access to your command +parameters. This is a concern if the command includes the value of a secret. Learn how to +Mitigate the risks of using command-line tools to store Secrets Manager secrets. # Arguments - `name`: The name of the new secret. The secret name can contain ASCII letters, numbers, @@ -725,7 +728,11 @@ log entry when you call this action. Do not include sensitive information in req parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions -for Secrets Manager and Authentication and access control in Secrets Manager. +for Secrets Manager and Authentication and access control in Secrets Manager. When you +enter commands in a command shell, there is a risk of the command history being accessed or +utilities having access to your command parameters. This is a concern if the command +includes the value of a secret. Learn how to Mitigate the risks of using command-line tools +to store Secrets Manager secrets. # Arguments - `secret_id`: The ARN or name of the secret to add a new version to. For an ARN, we @@ -1215,8 +1222,12 @@ secretsmanager:UpdateSecret. For more information, see IAM policy actions for S Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission -to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new -key. For more information, see Secret encryption and decryption. +to the new key, Secrets Manager does not re-encrypt existing secret versions with the new +key. For more information, see Secret encryption and decryption. When you enter commands +in a command shell, there is a risk of the command history being accessed or utilities +having access to your command parameters. This is a concern if the command includes the +value of a secret. Learn how to Mitigate the risks of using command-line tools to store +Secrets Manager secrets. # Arguments - `secret_id`: The ARN or name of the secret. For an ARN, we recommend that you specify a @@ -1239,13 +1250,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"KmsKeyId"`: The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new - key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more - information about versions and staging labels, see Concepts: Version. A key alias is always - prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About - aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web Services - managed key aws/secretsmanager. If this key doesn't already exist in your account, then - Secrets Manager creates it for you automatically. All users and roles in the Amazon Web - Services account automatically have access to use aws/secretsmanager. Creating + key, Secrets Manager does not re-encrypt existing secret versions with the new key. For + more information about versions and staging labels, see Concepts: Version. A key alias is + always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see + About aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web + Services managed key aws/secretsmanager. If this key doesn't already exist in your account, + then Secrets Manager creates it for you automatically. All users and roles in the Amazon + Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. diff --git a/src/services/sfn.jl b/src/services/sfn.jl index dbd8f7bdbf..5986aab180 100644 --- a/src/services/sfn.jl +++ b/src/services/sfn.jl @@ -30,6 +30,7 @@ case, tags will not be updated, even if they are different. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"encryptionConfiguration"`: Settings to configure server-side encryption. - `"tags"`: The list of tags to add to a resource. An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide, and Controlling Access Using IAM Tags. Tags may only contain Unicode @@ -63,15 +64,19 @@ work (Task states), determine to which states to transition next (Choice states) execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide. If you set the publish parameter of this API action to true, it -publishes version 1 as the first revision of the state machine. This operation is -eventually consistent. The results are best effort and may not reflect very recent updates -and changes. CreateStateMachine is an idempotent API. Subsequent requests won’t create -a duplicate resource if it was already created. CreateStateMachine's idempotency check is -based on the state machine name, definition, type, LoggingConfiguration, and -TracingConfiguration. The check is also based on the publish and versionDescription -parameters. If a following request has a different roleArn or tags, Step Functions will -ignore these differences and treat it as an idempotent request of the previous. In this -case, roleArn and tags will not be updated, even if they are different. +publishes version 1 as the first revision of the state machine. For additional control +over security, you can encrypt your data using a customer-managed key for Step Functions +state machines. You can configure a symmetric KMS key and data key reuse period when +creating or updating a State Machine. The execution history and state machine definition +will be encrypted with the key applied to the State Machine. This operation is eventually +consistent. The results are best effort and may not reflect very recent updates and +changes. CreateStateMachine is an idempotent API. Subsequent requests won’t create a +duplicate resource if it was already created. CreateStateMachine's idempotency check is +based on the state machine name, definition, type, LoggingConfiguration, +TracingConfiguration, and EncryptionConfiguration The check is also based on the publish +and versionDescription parameters. If a following request has a different roleArn or tags, +Step Functions will ignore these differences and treat it as an idempotent request of the +previous. In this case, roleArn and tags will not be updated, even if they are different. # Arguments - `definition`: The Amazon States Language definition of the state machine. See Amazon @@ -84,6 +89,7 @@ case, roleArn and tags will not be updated, even if they are different. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"encryptionConfiguration"`: Settings to configure server-side encryption. - `"loggingConfiguration"`: Defines what execution history events are logged and where they are logged. By default, the level is set to OFF. For more information see Log Levels in the Step Functions User Guide. @@ -425,6 +431,12 @@ supported by DescribeExecution unless a Map Run dispatched them. # Arguments - `execution_arn`: The Amazon Resource Name (ARN) of the execution to describe. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includedData"`: If your state machine definition is encrypted with a KMS key, callers + must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call + DescribeStateMachine API with includedData = METADATA_ONLY to get a successful response + without the encrypted definition. """ function describe_execution(executionArn; aws_config::AbstractAWSConfig=global_aws_config()) return sfn( @@ -514,6 +526,15 @@ changes. about that version. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includedData"`: If your state machine definition is encrypted with a KMS key, callers + must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the + API with includedData = METADATA_ONLY to get a successful response without the encrypted + definition. When calling a labelled ARN for an encrypted state machine, the includedData + = METADATA_ONLY parameter will not apply because Step Functions needs to decrypt the entire + state machine definition to get the Distributed Map state’s definition. In this case, the + API caller needs to have kms:Decrypt permission. """ function describe_state_machine( stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -598,6 +619,12 @@ supported by EXPRESS state machines. - `execution_arn`: The Amazon Resource Name (ARN) of the execution you want state machine information for. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includedData"`: If your state machine definition is encrypted with a KMS key, callers + must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the + API with includedData = METADATA_ONLY to get a successful response without the encrypted + definition. """ function describe_state_machine_for_execution( executionArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1180,7 +1207,11 @@ end send_task_failure(task_token, params::Dict{String,<:Any}) Used by activity workers, Task states using the callback pattern, and optionally Task -states using the job run pattern to report that the task identified by the taskToken failed. +states using the job run pattern to report that the task identified by the taskToken +failed. For an execution with encryption enabled, Step Functions will encrypt the error and +cause fields using the KMS key for the execution role. A caller can mark a task as fail +without using any KMS permissions in the execution role if the caller provides a null value +for both error and cause fields because no data needs to be encrypted. # Arguments - `task_token`: The token that represents this task. Task tokens are generated by Step @@ -1412,6 +1443,10 @@ configuration. This API action isn't logged in CloudTrail. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includedData"`: If your state machine definition is encrypted with a KMS key, callers + must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the + API with includedData = METADATA_ONLY to get a successful response without the encrypted + definition. - `"input"`: The string that contains the JSON input data for the execution, for example: \"input\": \"{\"first_name\" : \"test\"}\" If you don't include any JSON input data, you still must include the two braces, for example: \"input\": \"{}\" Length constraints @@ -1451,7 +1486,11 @@ end stop_execution(execution_arn) stop_execution(execution_arn, params::Dict{String,<:Any}) -Stops an execution. This API action is not supported by EXPRESS state machines. +Stops an execution. This API action is not supported by EXPRESS state machines. For an +execution with encryption enabled, Step Functions will encrypt the error and cause fields +using the KMS key for the execution role. A caller can stop an execution without using any +KMS permissions in the execution role if the caller provides a null value for both error +and cause fields because no data needs to be encrypted. # Arguments - `execution_arn`: The Amazon Resource Name (ARN) of the execution to stop. @@ -1686,11 +1725,12 @@ end update_state_machine(state_machine_arn) update_state_machine(state_machine_arn, params::Dict{String,<:Any}) -Updates an existing state machine by modifying its definition, roleArn, or -loggingConfiguration. Running executions will continue to use the previous definition and -roleArn. You must include at least one of definition or roleArn or you will receive a -MissingRequiredParameter error. A qualified state machine ARN refers to a Distributed Map -state defined within a state machine. For example, the qualified state machine ARN +Updates an existing state machine by modifying its definition, roleArn, +loggingConfiguration, or EncryptionConfiguration. Running executions will continue to use +the previous definition and roleArn. You must include at least one of definition or roleArn +or you will receive a MissingRequiredParameter error. A qualified state machine ARN refers +to a Distributed Map state defined within a state machine. For example, the qualified state +machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName. A qualified state machine ARN can either refer to a Distributed Map state @@ -1721,6 +1761,7 @@ call UpdateStateMachine may use the previous state machine definition and roleAr Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"definition"`: The Amazon States Language definition of the state machine. See Amazon States Language. +- `"encryptionConfiguration"`: Settings to configure server-side encryption. - `"loggingConfiguration"`: Use the LoggingConfiguration data type to set CloudWatch Logs options. - `"publish"`: Specifies whether the state machine version is published. The default is diff --git a/src/services/ssm_quicksetup.jl b/src/services/ssm_quicksetup.jl new file mode 100644 index 0000000000..d5079c9951 --- /dev/null +++ b/src/services/ssm_quicksetup.jl @@ -0,0 +1,423 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: ssm_quicksetup +using AWS.Compat +using AWS.UUIDs + +""" + create_configuration_manager(configuration_definitions) + create_configuration_manager(configuration_definitions, params::Dict{String,<:Any}) + +Creates a Quick Setup configuration manager resource. This object is a collection of +desired state configurations for multiple configuration definitions and summaries +describing the deployments of those definitions. + +# Arguments +- `configuration_definitions`: The definition of the Quick Setup configuration that the + configuration manager deploys. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A description of the configuration manager. +- `"Name"`: A name for the configuration manager. +- `"Tags"`: Key-value pairs of metadata to assign to the configuration manager. +""" +function create_configuration_manager( + ConfigurationDefinitions; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "POST", + "/configurationManager", + Dict{String,Any}("ConfigurationDefinitions" => ConfigurationDefinitions); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_configuration_manager( + ConfigurationDefinitions, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "POST", + "/configurationManager", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ConfigurationDefinitions" => ConfigurationDefinitions), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_configuration_manager(manager_arn) + delete_configuration_manager(manager_arn, params::Dict{String,<:Any}) + +Deletes a configuration manager. + +# Arguments +- `manager_arn`: The ID of the configuration manager. + +""" +function delete_configuration_manager( + ManagerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "DELETE", + "/configurationManager/$(ManagerArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_configuration_manager( + ManagerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "DELETE", + "/configurationManager/$(ManagerArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configuration_manager(manager_arn) + get_configuration_manager(manager_arn, params::Dict{String,<:Any}) + +Returns a configuration manager. + +# Arguments +- `manager_arn`: The ARN of the configuration manager. + +""" +function get_configuration_manager( + ManagerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "GET", + "/configurationManager/$(ManagerArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configuration_manager( + ManagerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "GET", + "/configurationManager/$(ManagerArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_service_settings() + get_service_settings(params::Dict{String,<:Any}) + +Returns settings configured for Quick Setup in the requesting Amazon Web Services account +and Amazon Web Services Region. + +""" +function get_service_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "GET", "/serviceSettings"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_service_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "GET", + "/serviceSettings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_configuration_managers() + list_configuration_managers(params::Dict{String,<:Any}) + +Returns Quick Setup configuration managers. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters the results returned by the request. +- `"MaxItems"`: Specifies the maximum number of configuration managers that are returned by + the request. +- `"StartingToken"`: The token to use when requesting a specific set of items from a list. +""" +function list_configuration_managers(; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "POST", + "/listConfigurationManagers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_configuration_managers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "POST", + "/listConfigurationManagers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_quick_setup_types() + list_quick_setup_types(params::Dict{String,<:Any}) + +Returns the available Quick Setup types. + +""" +function list_quick_setup_types(; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "GET", + "/listQuickSetupTypes"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_quick_setup_types( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "GET", + "/listQuickSetupTypes", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns tags assigned to the resource. + +# Arguments +- `resource_arn`: The ARN of the resource the tag is assigned to. + +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "GET", + "/tags/$(ResourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "GET", + "/tags/$(ResourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Assigns key-value pairs of metadata to Amazon Web Services resources. + +# Arguments +- `resource_arn`: The ARN of the resource to tag. +- `tags`: Key-value pairs of metadata to assign to the resource. + +""" +function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "PUT", + "/tags/$(ResourceArn)", + Dict{String,Any}("Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceArn, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "PUT", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Tags" => Tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes tags from the specified resource. + +# Arguments +- `resource_arn`: The ARN of the resource to remove tags from. +- `tag_keys`: The keys of the tags to remove from the resource. + +""" +function untag_resource( + ResourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_configuration_definition(id, manager_arn) + update_configuration_definition(id, manager_arn, params::Dict{String,<:Any}) + +Updates a Quick Setup configuration definition. + +# Arguments +- `id`: The ID of the configuration definition you want to update. +- `manager_arn`: The ARN of the configuration manager associated with the definition to + update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LocalDeploymentAdministrationRoleArn"`: The ARN of the IAM role used to administrate + local configuration deployments. +- `"LocalDeploymentExecutionRoleName"`: The name of the IAM role used to deploy local + configurations. +- `"Parameters"`: The parameters for the configuration definition type. +- `"TypeVersion"`: The version of the Quick Setup type to use. +""" +function update_configuration_definition( + Id, ManagerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "PUT", + "/configurationDefinition/$(ManagerArn)/$(Id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configuration_definition( + Id, + ManagerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "PUT", + "/configurationDefinition/$(ManagerArn)/$(Id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_configuration_manager(manager_arn) + update_configuration_manager(manager_arn, params::Dict{String,<:Any}) + +Updates a Quick Setup configuration manager. + +# Arguments +- `manager_arn`: The ARN of the configuration manager. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A description of the configuration manager. +- `"Name"`: A name for the configuration manager. +""" +function update_configuration_manager( + ManagerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "PUT", + "/configurationManager/$(ManagerArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configuration_manager( + ManagerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "PUT", + "/configurationManager/$(ManagerArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_service_settings() + update_service_settings(params::Dict{String,<:Any}) + +Updates settings configured for Quick Setup. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExplorerEnablingRoleArn"`: The IAM role used to enable Explorer. +""" +function update_service_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "PUT", "/serviceSettings"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function update_service_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "PUT", + "/serviceSettings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/timestream_query.jl b/src/services/timestream_query.jl index 1ed4086d20..ab7191e474 100644 --- a/src/services/timestream_query.jl +++ b/src/services/timestream_query.jl @@ -620,7 +620,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys The maximum value supported for MaxQueryTCU is 1000. To request an increase to this soft limit, contact Amazon Web Services Support. For information about the default quota for maxQueryTCU, see Default quotas. -- `"QueryPricingModel"`: The pricing model for queries in an account. +- `"QueryPricingModel"`: The pricing model for queries in an account. The + QueryPricingModel parameter is used by several Timestream operations; however, the + UpdateAccountSettings API operation doesn't recognize any values other than COMPUTE_UNITS. """ function update_account_settings(; aws_config::AbstractAWSConfig=global_aws_config()) return timestream_query( diff --git a/src/services/tnb.jl b/src/services/tnb.jl index 05df993e3a..f436b0210a 100644 --- a/src/services/tnb.jl +++ b/src/services/tnb.jl @@ -291,7 +291,7 @@ end get_sol_function_instance(vnf_instance_id) get_sol_function_instance(vnf_instance_id, params::Dict{String,<:Any}) -Gets the details of a network function instance, including the instantation state and +Gets the details of a network function instance, including the instantiation state and metadata from the function package descriptor in the network function package. A network function instance is a function in a function package . @@ -665,9 +665,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"tags"`: A tag is a label that you assign to an Amazon Web Services resource. Each tag - consists of a key and an optional value. When you use this API, the tags are transferred to - the network operation that is created. Use tags to search and filter your resources or - track your Amazon Web Services costs. + consists of a key and an optional value. When you use this API, the tags are only applied + to the network operation that is created. These tags are not applied to the network + instance. Use tags to search and filter your resources or track your Amazon Web Services + costs. """ function instantiate_sol_network_instance( nsInstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -804,6 +805,8 @@ as network instance instantiation or termination. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"max_results"`: The maximum number of results to include in the response. - `"nextpage_opaque_marker"`: The token for the next page of results. +- `"nsInstanceId"`: Network instance id filter, to retrieve network operations associated + to a network instance. """ function list_sol_network_operations(; aws_config::AbstractAWSConfig=global_aws_config()) return tnb( @@ -1031,9 +1034,10 @@ delete it. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"tags"`: A tag is a label that you assign to an Amazon Web Services resource. Each tag - consists of a key and an optional value. When you use this API, the tags are transferred to - the network operation that is created. Use tags to search and filter your resources or - track your Amazon Web Services costs. + consists of a key and an optional value. When you use this API, the tags are only applied + to the network operation that is created. These tags are not applied to the network + instance. Use tags to search and filter your resources or track your Amazon Web Services + costs. """ function terminate_sol_network_instance( nsInstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1148,20 +1152,28 @@ end Update a network instance. A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, -update, and delete) can be performed. +update, and delete) can be performed. Choose the updateType parameter to target the +necessary update of the network instance. # Arguments - `ns_instance_id`: ID of the network instance. -- `update_type`: The type of update. +- `update_type`: The type of update. Use the MODIFY_VNF_INFORMATION update type, to + update a specific network function configuration, in the network instance. Use the + UPDATE_NS update type, to update the network instance to a new network service descriptor. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"modifyVnfInfoData"`: Identifies the network function information parameters and/or the - configurable properties of the network function to be modified. + configurable properties of the network function to be modified. Include this property only + if the update type is MODIFY_VNF_INFORMATION. - `"tags"`: A tag is a label that you assign to an Amazon Web Services resource. Each tag - consists of a key and an optional value. When you use this API, the tags are transferred to - the network operation that is created. Use tags to search and filter your resources or - track your Amazon Web Services costs. + consists of a key and an optional value. When you use this API, the tags are only applied + to the network operation that is created. These tags are not applied to the network + instance. Use tags to search and filter your resources or track your Amazon Web Services + costs. +- `"updateNs"`: Identifies the network service descriptor and the configurable properties + of the descriptor, to be used for the update. Include this property only if the update type + is UPDATE_NS. """ function update_sol_network_instance( nsInstanceId, updateType; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/workspaces.jl b/src/services/workspaces.jl index 2d5b341c5f..e569e7b2a7 100644 --- a/src/services/workspaces.jl +++ b/src/services/workspaces.jl @@ -727,7 +727,9 @@ WorkSpaces are created. The MANUAL running mode value is only supported by Am WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles. User-decoupled -WorkSpaces are only supported by Amazon WorkSpaces Core. +WorkSpaces are only supported by Amazon WorkSpaces Core. Review your running mode to +ensure you are using one that is optimal for your needs and budget. For more information on +switching running modes, see Can I switch between hourly and monthly billing? # Arguments - `workspaces`: The WorkSpaces to create. You can specify up to 25 WorkSpaces. @@ -756,6 +758,75 @@ function create_workspaces( ) end +""" + create_workspaces_pool(bundle_id, capacity, description, directory_id, pool_name) + create_workspaces_pool(bundle_id, capacity, description, directory_id, pool_name, params::Dict{String,<:Any}) + +Creates a pool of WorkSpaces. + +# Arguments +- `bundle_id`: The identifier of the bundle for the pool. +- `capacity`: The user capacity of the pool. +- `description`: The pool description. +- `directory_id`: The identifier of the directory for the pool. +- `pool_name`: The name of the pool. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationSettings"`: Indicates the application settings of the pool. +- `"Tags"`: The tags for the pool. +- `"TimeoutSettings"`: Indicates the timeout settings of the pool. +""" +function create_workspaces_pool( + BundleId, + Capacity, + Description, + DirectoryId, + PoolName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "CreateWorkspacesPool", + Dict{String,Any}( + "BundleId" => BundleId, + "Capacity" => Capacity, + "Description" => Description, + "DirectoryId" => DirectoryId, + "PoolName" => PoolName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_workspaces_pool( + BundleId, + Capacity, + Description, + DirectoryId, + PoolName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "CreateWorkspacesPool", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "BundleId" => BundleId, + "Capacity" => Capacity, + "Description" => Description, + "DirectoryId" => DirectoryId, + "PoolName" => PoolName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_account_link_invitation(link_id) delete_account_link_invitation(link_id, params::Dict{String,<:Any}) @@ -1698,6 +1769,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Limit"`: The maximum number of directories to return. - `"NextToken"`: If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results. +- `"WorkspaceDirectoryNames"`: The names of the WorkSpace directories. """ function describe_workspace_directories(; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces( @@ -1890,6 +1962,73 @@ function describe_workspaces_connection_status( ) end +""" + describe_workspaces_pool_sessions(pool_id) + describe_workspaces_pool_sessions(pool_id, params::Dict{String,<:Any}) + +Retrieves a list that describes the streaming sessions for a specified pool. + +# Arguments +- `pool_id`: The identifier of the pool. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: The maximum number of items to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +- `"UserId"`: The identifier of the user. +""" +function describe_workspaces_pool_sessions( + PoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPoolSessions", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_workspaces_pool_sessions( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPoolSessions", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_workspaces_pools() + describe_workspaces_pools(params::Dict{String,<:Any}) + +Describes the specified WorkSpaces Pools. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: The filter conditions for the WorkSpaces Pool to return. +- `"Limit"`: The maximum number of items to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +- `"PoolIds"`: The identifier of the WorkSpaces Pools. +""" +function describe_workspaces_pools(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "DescribeWorkspacesPools"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_workspaces_pools( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPools", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_connection_alias(alias_id) disassociate_connection_alias(alias_id, params::Dict{String,<:Any}) @@ -2501,6 +2640,44 @@ function modify_selfservice_permissions( ) end +""" + modify_streaming_properties(resource_id) + modify_streaming_properties(resource_id, params::Dict{String,<:Any}) + +Modifies the specified streaming properties. + +# Arguments +- `resource_id`: The identifier of the resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"StreamingProperties"`: The streaming properties to configure. +""" +function modify_streaming_properties( + ResourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "ModifyStreamingProperties", + Dict{String,Any}("ResourceId" => ResourceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_streaming_properties( + ResourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "ModifyStreamingProperties", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceId" => ResourceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_workspace_access_properties(resource_id, workspace_access_properties) modify_workspace_access_properties(resource_id, workspace_access_properties, params::Dict{String,<:Any}) @@ -2775,29 +2952,28 @@ function rebuild_workspaces( end """ - register_workspace_directory(directory_id, enable_work_docs) - register_workspace_directory(directory_id, enable_work_docs, params::Dict{String,<:Any}) + register_workspace_directory() + register_workspace_directory(params::Dict{String,<:Any}) Registers the specified directory. This operation is asynchronous and returns before the WorkSpace directory is registered. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role. -# Arguments -- `directory_id`: The identifier of the directory. You cannot register a directory if it +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ActiveDirectoryConfig"`: The active directory config of the directory. +- `"DirectoryId"`: The identifier of the directory. You cannot register a directory if it does not have a status of Active. If the directory does not have a status of Active, you will receive an InvalidResourceStateException error. If you have already registered the maximum number of directories that you can register with Amazon WorkSpaces, you will receive a ResourceLimitExceededException error. Deregister directories that you are not using for WorkSpaces, and try again. -- `enable_work_docs`: Indicates whether Amazon WorkDocs is enabled or disabled. If you have - enabled this parameter and WorkDocs is not available in the Region, you will receive an - OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"EnableSelfService"`: Indicates whether self-service capabilities are enabled or disabled. +- `"EnableWorkDocs"`: Indicates whether Amazon WorkDocs is enabled or disabled. If you have + enabled this parameter and WorkDocs is not available in the Region, you will receive an + OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again. - `"SubnetIds"`: The identifiers of the subnets for your virtual private cloud (VPC). Make sure that the subnets are in supported Availability Zones. The subnets must also be in separate Availability Zones. If these conditions are not met, you will receive an @@ -2808,34 +2984,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Web Services account must be enabled for BYOL. If your account has not been enabled for BYOL, you will receive an InvalidParameterValuesException error. For more information about BYOL images, see Bring Your Own Windows Desktop Images. +- `"UserIdentityType"`: The type of identity management the user is using. +- `"WorkspaceDirectoryDescription"`: Description of the directory to register. +- `"WorkspaceDirectoryName"`: The name of the directory to register. +- `"WorkspaceType"`: Indicates whether the directory's WorkSpace type is personal or pools. """ -function register_workspace_directory( - DirectoryId, EnableWorkDocs; aws_config::AbstractAWSConfig=global_aws_config() -) +function register_workspace_directory(; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces( - "RegisterWorkspaceDirectory", - Dict{String,Any}("DirectoryId" => DirectoryId, "EnableWorkDocs" => EnableWorkDocs); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + "RegisterWorkspaceDirectory"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end function register_workspace_directory( - DirectoryId, - EnableWorkDocs, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return workspaces( "RegisterWorkspaceDirectory", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "DirectoryId" => DirectoryId, "EnableWorkDocs" => EnableWorkDocs - ), - params, - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2993,6 +3157,36 @@ function start_workspaces( ) end +""" + start_workspaces_pool(pool_id) + start_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Starts the specified pool. You cannot start a pool unless it has a running mode of AutoStop +and a state of STOPPED. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function start_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "StartWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "StartWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_workspaces(stop_workspace_requests) stop_workspaces(stop_workspace_requests, params::Dict{String,<:Any}) @@ -3033,6 +3227,36 @@ function stop_workspaces( ) end +""" + stop_workspaces_pool(pool_id) + stop_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Stops the specified pool. You cannot stop a WorkSpace pool unless it has a running mode of +AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function stop_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "StopWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "StopWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ terminate_workspaces(terminate_workspace_requests) terminate_workspaces(terminate_workspace_requests, params::Dict{String,<:Any}) @@ -3090,6 +3314,72 @@ function terminate_workspaces( ) end +""" + terminate_workspaces_pool(pool_id) + terminate_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Terminates the specified pool. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function terminate_workspaces_pool( + PoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function terminate_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + terminate_workspaces_pool_session(session_id) + terminate_workspaces_pool_session(session_id, params::Dict{String,<:Any}) + +Terminates the pool session. + +# Arguments +- `session_id`: The identifier of the pool session. + +""" +function terminate_workspaces_pool_session( + SessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPoolSession", + Dict{String,Any}("SessionId" => SessionId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function terminate_workspaces_pool_session( + SessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "TerminateWorkspacesPoolSession", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SessionId" => SessionId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_connect_client_add_in(add_in_id, resource_id) update_connect_client_add_in(add_in_id, resource_id, params::Dict{String,<:Any}) @@ -3334,3 +3624,40 @@ function update_workspace_image_permission( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_workspaces_pool(pool_id) + update_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Updates the specified pool. + +# Arguments +- `pool_id`: The identifier of the specified pool to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationSettings"`: The persistent application settings for users in the pool. +- `"BundleId"`: The identifier of the bundle. +- `"Capacity"`: The desired capacity for the pool. +- `"Description"`: Describes the specified pool to update. +- `"DirectoryId"`: The identifier of the directory. +- `"TimeoutSettings"`: Indicates the timeout settings of the specified pool. +""" +function update_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "UpdateWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "UpdateWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end