From 3266ffc736978af37b6bc13333e6598789dc18dd Mon Sep 17 00:00:00 2001 From: mattBrzezinski Date: Tue, 20 Jun 2023 06:15:02 +0000 Subject: [PATCH] AWS API Definitions Updated --- src/services/application_discovery_service.jl | 85 ++- src/services/auditmanager.jl | 106 ++- src/services/cloudformation.jl | 19 +- src/services/connect.jl | 14 +- src/services/ec2.jl | 20 +- src/services/ecs.jl | 10 +- src/services/efs.jl | 17 +- src/services/guardduty.jl | 64 +- src/services/iam.jl | 15 +- src/services/location.jl | 48 +- src/services/route_53_domains.jl | 74 +- src/services/s3.jl | 641 +++++++++--------- src/services/sagemaker.jl | 61 +- 13 files changed, 660 insertions(+), 514 deletions(-) diff --git a/src/services/application_discovery_service.jl b/src/services/application_discovery_service.jl index a4ce003ec0..cabc439239 100644 --- a/src/services/application_discovery_service.jl +++ b/src/services/application_discovery_service.jl @@ -255,18 +255,18 @@ end describe_agents() describe_agents(params::Dict{String,<:Any}) -Lists agents or connectors as specified by ID or other filters. All agents/connectors -associated with your user account can be listed if you call DescribeAgents as is without -passing any parameters. +Lists agents or collectors as specified by ID or other filters. All agents/collectors +associated with your user can be listed if you call DescribeAgents as is without passing +any parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"agentIds"`: The agent or the Connector IDs for which you want information. If you - specify no IDs, the system returns information about all agents/Connectors associated with - your Amazon Web Services user account. +- `"agentIds"`: The agent or the collector IDs for which you want information. If you + specify no IDs, the system returns information about all agents/collectors associated with + your user. - `"filters"`: You can filter the request using various logical operators and a key-value format. For example: {\"key\": \"collectionStatus\", \"value\": \"STARTED\"} -- `"maxResults"`: The total number of agents/Connectors to return in a single page of +- `"maxResults"`: The total number of agents/collectors to return in a single page of output. The maximum value is 100. - `"nextToken"`: Token to retrieve the next set of results. For example, if you previously specified 100 IDs for DescribeAgentsRequestagentIds but set DescribeAgentsRequestmaxResults @@ -333,8 +333,8 @@ end describe_continuous_exports() describe_continuous_exports(params::Dict{String,<:Any}) -Lists exports as specified by ID. All continuous exports associated with your user account -can be listed if you call DescribeContinuousExports as is without passing any parameters. +Lists exports as specified by ID. All continuous exports associated with your user can be +listed if you call DescribeContinuousExports as is without passing any parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -466,8 +466,8 @@ end Retrieves a list of configuration items that have tags as specified by the key-value pairs, name and value, passed to the optional parameter filters. There are three valid tag filter names: tagKey tagValue configurationId Also, all configuration items associated -with your user account that have tags can be listed if you call DescribeTags as is without -passing any parameters. +with your user that have tags can be listed if you call DescribeTags as is without passing +any parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -722,16 +722,15 @@ end start_data_collection_by_agent_ids(agent_ids) start_data_collection_by_agent_ids(agent_ids, params::Dict{String,<:Any}) -Instructs the specified agents or connectors to start collecting data. +Instructs the specified agents to start collecting data. # Arguments -- `agent_ids`: The IDs of the agents or connectors from which to start collecting data. If - you send a request to an agent/connector ID that you do not have permission to contact, - according to your Amazon Web Services account, the service does not throw an exception. - Instead, it returns the error in the Description field. If you send a request to multiple - agents/connectors and you do not have permission to contact some of those - agents/connectors, the system does not throw an exception. Instead, the system shows Failed - in the Description field. +- `agent_ids`: The IDs of the agents from which to start collecting data. If you send a + request to an agent ID that you do not have permission to contact, according to your Amazon + Web Services account, the service does not throw an exception. Instead, it returns the + error in the Description field. If you send a request to multiple agents and you do not + have permission to contact some of those agents, the system does not throw an exception. + Instead, the system shows Failed in the Description field. """ function start_data_collection_by_agent_ids( @@ -763,14 +762,22 @@ end start_export_task() start_export_task(params::Dict{String,<:Any}) - Begins the export of discovered data to an S3 bucket. If you specify agentIds in a -filter, the task exports up to 72 hours of detailed data collected by the identified -Application Discovery Agent, including network, process, and performance details. A time -range for exported agent data may be set by using startTime and endTime. Export of detailed -agent data is limited to five concurrently running exports. If you do not include an -agentIds filter, summary data is exported that includes both Amazon Web Services Agentless -Discovery Connector data and summary data from Amazon Web Services Discovery Agents. Export -of summary data is limited to two exports per day. +Begins the export of a discovered data report to an Amazon S3 bucket managed by Amazon Web +Services. Exports might provide an estimate of fees and savings based on certain +information that you provide. Fee estimates do not include any taxes that might apply. Your +actual fees and savings depend on a variety of factors, including your actual usage of +Amazon Web Services services, which might vary from the estimates provided in this report. +If you do not specify preferences or agentIds in the filter, a summary of all servers, +applications, tags, and performance is generated. This data is an aggregation of all server +data collected through on-premises tooling, file import, application grouping and applying +tags. If you specify agentIds in a filter, the task exports up to 72 hours of detailed data +collected by the identified Application Discovery Agent, including network, process, and +performance details. A time range for exported agent data may be set by using startTime and +endTime. Export of detailed agent data is limited to five concurrently running exports. +Export of detailed agent data is limited to two exports per day. If you enable +ec2RecommendationsPreferences in preferences , an Amazon EC2 instance matching the +characteristics of each server in Application Discovery Service is generated. Changing the +attributes of the ec2RecommendationsPreferences changes the criteria of the recommendation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -782,8 +789,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"filters"`: If a filter is present, it selects the single agentId of the Application Discovery Agent for which data is exported. The agentId can be found in the results of the DescribeAgents API or CLI. If no filter is present, startTime and endTime are ignored and - exported data includes both Agentless Discovery Connector data and summary data from - Application Discovery agents. + exported data includes both Amazon Web Services Application Discovery Service Agentless + Collector collectors data and summary data from Application Discovery Agent agents. +- `"preferences"`: Indicates the type of data that needs to be exported. Only one + ExportPreferences can be enabled at any time. - `"startTime"`: The start timestamp for exported data from the single Application Discovery Agent selected in the filters. If no value is specified, data is exported starting from the first data collected by the agent. @@ -806,12 +815,14 @@ end start_import_task(import_url, name, params::Dict{String,<:Any}) Starts an import task, which allows you to import details of your on-premises environment -directly into Amazon Web Services Migration Hub without having to use the Application -Discovery Service (ADS) tools such as the Discovery Connector or Discovery Agent. This -gives you the option to perform migration assessment and planning directly from your -imported data, including the ability to group your devices as applications and track their -migration status. To start an import request, do this: Download the specially formatted -comma separated value (CSV) import template, which you can find here: +directly into Amazon Web Services Migration Hub without having to use the Amazon Web +Services Application Discovery Service (Application Discovery Service) tools such as the +Amazon Web Services Application Discovery Service Agentless Collector or Application +Discovery Agent. This gives you the option to perform migration assessment and planning +directly from your imported data, including the ability to group your devices as +applications and track their migration status. To start an import request, do this: +Download the specially formatted comma separated value (CSV) import template, which you can +find here: https://s3.us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_tem plate.csv. Fill out the template with your server and application data. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file @@ -915,10 +926,10 @@ end stop_data_collection_by_agent_ids(agent_ids) stop_data_collection_by_agent_ids(agent_ids, params::Dict{String,<:Any}) -Instructs the specified agents or connectors to stop collecting data. +Instructs the specified agents to stop collecting data. # Arguments -- `agent_ids`: The IDs of the agents or connectors from which to stop collecting data. +- `agent_ids`: The IDs of the agents from which to stop collecting data. """ function stop_data_collection_by_agent_ids( diff --git a/src/services/auditmanager.jl b/src/services/auditmanager.jl index 6b33ae45a8..c24660a730 100644 --- a/src/services/auditmanager.jl +++ b/src/services/auditmanager.jl @@ -238,15 +238,15 @@ end batch_import_evidence_to_assessment_control(assessment_id, control_id, control_set_id, manual_evidence) batch_import_evidence_to_assessment_control(assessment_id, control_id, control_set_id, manual_evidence, params::Dict{String,<:Any}) -Uploads one or more pieces of evidence to a control in an Audit Manager assessment. You can -upload manual evidence from any Amazon Simple Storage Service (Amazon S3) bucket by -specifying the S3 URI of the evidence. You must upload manual evidence to your S3 bucket -before you can upload it to your assessment. For instructions, see CreateBucket and -PutObject in the Amazon Simple Storage Service API Reference. The following restrictions -apply to this action: Maximum size of an individual evidence file: 100 MB Number of -daily manual evidence uploads per control: 100 Supported file formats: See Supported file -types for manual evidence in the Audit Manager User Guide For more information about -Audit Manager service restrictions, see Quotas and restrictions for Audit Manager. +Adds one or more pieces of evidence to a control in an Audit Manager assessment. You can +import manual evidence from any S3 bucket by specifying the S3 URI of the object. You can +also upload a file from your browser, or enter plain text in response to a risk assessment +question. The following restrictions apply to this action: manualEvidence can be only +one of the following: evidenceFileName, s3ResourcePath, or textResponse Maximum size of +an individual evidence file: 100 MB Number of daily manual evidence uploads per control: +100 Supported file formats: See Supported file types for manual evidence in the Audit +Manager User Guide For more information about Audit Manager service restrictions, see +Quotas and restrictions for Audit Manager. # Arguments - `assessment_id`: The identifier for the assessment. @@ -671,7 +671,11 @@ end delete_control(control_id) delete_control(control_id, params::Dict{String,<:Any}) - Deletes a custom control in Audit Manager. + Deletes a custom control in Audit Manager. When you invoke this operation, the custom +control is deleted from any frameworks or assessments that it’s currently part of. As a +result, Audit Manager will stop collecting evidence for that custom control in all of your +assessments. This includes assessments that you previously created before you deleted the +custom control. # Arguments - `control_id`: The unique identifier for the control. @@ -838,7 +842,7 @@ end get_account_status() get_account_status(params::Dict{String,<:Any}) - Returns the registration status of an account in Audit Manager. + Gets the registration status of an account in Audit Manager. """ function get_account_status(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -862,7 +866,7 @@ end get_assessment(assessment_id) get_assessment(assessment_id, params::Dict{String,<:Any}) -Returns an assessment from Audit Manager. +Gets information about a specified assessment. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -894,7 +898,7 @@ end get_assessment_framework(framework_id) get_assessment_framework(framework_id, params::Dict{String,<:Any}) -Returns a framework from Audit Manager. +Gets information about a specified framework. # Arguments - `framework_id`: The identifier for the framework. @@ -928,7 +932,7 @@ end get_assessment_report_url(assessment_id, assessment_report_id) get_assessment_report_url(assessment_id, assessment_report_id, params::Dict{String,<:Any}) - Returns the URL of an assessment report in Audit Manager. + Gets the URL of an assessment report in Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -964,7 +968,7 @@ end get_change_logs(assessment_id) get_change_logs(assessment_id, params::Dict{String,<:Any}) - Returns a list of changelogs from Audit Manager. + Gets a list of changelogs from Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1003,7 +1007,7 @@ end get_control(control_id) get_control(control_id, params::Dict{String,<:Any}) - Returns a control from Audit Manager. + Gets information about a specified control. # Arguments - `control_id`: The identifier for the control. @@ -1035,7 +1039,7 @@ end get_delegations() get_delegations(params::Dict{String,<:Any}) - Returns a list of delegations from an audit owner to a delegate. + Gets a list of delegations from an audit owner to a delegate. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1064,7 +1068,7 @@ end get_evidence(assessment_id, control_set_id, evidence_folder_id, evidence_id) get_evidence(assessment_id, control_set_id, evidence_folder_id, evidence_id, params::Dict{String,<:Any}) - Returns evidence from Audit Manager. + Gets information about a specified evidence item. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1109,7 +1113,7 @@ end get_evidence_by_evidence_folder(assessment_id, control_set_id, evidence_folder_id) get_evidence_by_evidence_folder(assessment_id, control_set_id, evidence_folder_id, params::Dict{String,<:Any}) - Returns all evidence from a specified evidence folder in Audit Manager. + Gets all evidence from a specified evidence folder in Audit Manager. # Arguments - `assessment_id`: The identifier for the assessment. @@ -1152,11 +1156,55 @@ function get_evidence_by_evidence_folder( ) end +""" + get_evidence_file_upload_url(file_name) + get_evidence_file_upload_url(file_name, params::Dict{String,<:Any}) + +Creates a presigned Amazon S3 URL that can be used to upload a file as manual evidence. For +instructions on how to use this operation, see Upload a file from your browser in the +Audit Manager User Guide. The following restrictions apply to this operation: Maximum +size of an individual evidence file: 100 MB Number of daily manual evidence uploads per +control: 100 Supported file formats: See Supported file types for manual evidence in the +Audit Manager User Guide For more information about Audit Manager service restrictions, +see Quotas and restrictions for Audit Manager. + +# Arguments +- `file_name`: The file that you want to upload. For a list of supported file formats, see + Supported file types for manual evidence in the Audit Manager User Guide. + +""" +function get_evidence_file_upload_url( + fileName; aws_config::AbstractAWSConfig=global_aws_config() +) + return auditmanager( + "GET", + "/evidenceFileUploadUrl", + Dict{String,Any}("fileName" => fileName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_evidence_file_upload_url( + fileName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return auditmanager( + "GET", + "/evidenceFileUploadUrl", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("fileName" => fileName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_evidence_folder(assessment_id, control_set_id, evidence_folder_id) get_evidence_folder(assessment_id, control_set_id, evidence_folder_id, params::Dict{String,<:Any}) - Returns an evidence folder from the specified assessment in Audit Manager. + Gets an evidence folder from a specified assessment in Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1198,7 +1246,7 @@ end get_evidence_folders_by_assessment(assessment_id) get_evidence_folders_by_assessment(assessment_id, params::Dict{String,<:Any}) - Returns the evidence folders from a specified assessment in Audit Manager. + Gets the evidence folders from a specified assessment in Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1237,8 +1285,8 @@ end get_evidence_folders_by_assessment_control(assessment_id, control_id, control_set_id) get_evidence_folders_by_assessment_control(assessment_id, control_id, control_set_id, params::Dict{String,<:Any}) - Returns a list of evidence folders that are associated with a specified control in an -Audit Manager assessment. + Gets a list of evidence folders that are associated with a specified control in an Audit +Manager assessment. # Arguments - `assessment_id`: The identifier for the assessment. @@ -1335,7 +1383,7 @@ end get_organization_admin_account() get_organization_admin_account(params::Dict{String,<:Any}) - Returns the name of the delegated Amazon Web Services administrator account for the + Gets the name of the delegated Amazon Web Services administrator account for a specified organization. """ @@ -1363,7 +1411,7 @@ end get_services_in_scope() get_services_in_scope(params::Dict{String,<:Any}) -Returns a list of all of the Amazon Web Services that you can choose to include in your +Gets a list of all of the Amazon Web Services that you can choose to include in your assessment. When you create an assessment, specify which of these services you want to include to narrow the assessment's scope. @@ -1385,7 +1433,7 @@ end get_settings(attribute) get_settings(attribute, params::Dict{String,<:Any}) - Returns the settings for the specified Amazon Web Services account. + Gets the settings for a specified Amazon Web Services account. # Arguments - `attribute`: The list of setting attribute enum values. @@ -2435,8 +2483,10 @@ end # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"defaultAssessmentReportsDestination"`: The default storage destination for assessment - reports. +- `"defaultAssessmentReportsDestination"`: The default S3 destination bucket for storing + assessment reports. +- `"defaultExportDestination"`: The default S3 destination bucket for storing evidence + finder exports. - `"defaultProcessOwners"`: A list of the default audit owners. - `"deregistrationPolicy"`: The deregistration policy for your Audit Manager data. You can use this attribute to determine how your data is handled when you deregister Audit Manager. diff --git a/src/services/cloudformation.jl b/src/services/cloudformation.jl index 169183f478..d6b59bdc13 100644 --- a/src/services/cloudformation.jl +++ b/src/services/cloudformation.jl @@ -342,6 +342,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NotificationARNs"`: The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. +- `"OnStackFailure"`: Determines what action will be taken if stack creation fails. If this + parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation + must not be specified. This must be one of these values: DELETE - Deletes the change set + if the stack creation fails. This is only valid when the ChangeSetType parameter is set to + CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED. + DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true + for the DisableRollback parameter to the ExecuteChangeSet API operation. ROLLBACK - if + the stack creation fails, roll back the stack. This is equivalent to specifying false for + the DisableRollback parameter to the ExecuteChangeSet API operation. For nested stacks, + when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, + any failure in a child stack will cause the parent stack creation to fail and all stacks to + be deleted. - `"Parameters"`: A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type. - `"ResourceTypes"`: The template resource types that you have permissions to work with if @@ -2044,7 +2056,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys attempting to execute a change set to update a stack with the same name. You might retry ExecuteChangeSet requests to ensure that CloudFormation successfully received them. - `"DisableRollback"`: Preserves the state of previously provisioned resources when an - operation fails. Default: True + operation fails. This parameter can't be specified when the OnStackFailure parameter to the + CreateChangeSet API operation was specified. True - if the stack creation fails, do + nothing. This is equivalent to specifying DO_NOTHING for the OnStackFailure parameter to + the CreateChangeSet API operation. False - if the stack creation fails, roll back the + stack. This is equivalent to specifying ROLLBACK for the OnStackFailure parameter to the + CreateChangeSet API operation. Default: True - `"StackName"`: If you specified the name of a change set, specify the stack name or Amazon Resource Name (ARN) that's associated with the change set you want to execute. """ diff --git a/src/services/connect.jl b/src/services/connect.jl index 1af51b9c73..437dc39476 100644 --- a/src/services/connect.jl +++ b/src/services/connect.jl @@ -6046,8 +6046,8 @@ end resume_contact_recording(contact_id, initial_contact_id, instance_id, params::Dict{String,<:Any}) When a contact is being recorded, and the recording has been suspended using -SuspendContactRecording, this API resumes recording the call. Only voice recordings are -supported at this time. +SuspendContactRecording, this API resumes recording the call or screen. Voice and screen +recordings are supported. # Arguments - `contact_id`: The identifier of the contact. @@ -7222,11 +7222,11 @@ end suspend_contact_recording(contact_id, initial_contact_id, instance_id) suspend_contact_recording(contact_id, initial_contact_id, instance_id, params::Dict{String,<:Any}) -When a contact is being recorded, this API suspends recording the call. For example, you -might suspend the call recording while collecting sensitive information, such as a credit -card number. Then use ResumeContactRecording to restart recording. The period of time that -the recording is suspended is filled with silence in the final recording. Only voice -recordings are supported at this time. +When a contact is being recorded, this API suspends recording the call or screen. For +example, you might suspend the call or screen recording while collecting sensitive +information, such as a credit card number. Then use ResumeContactRecording to restart +recording. The period of time that the recording is suspended is filled with silence in the +final recording. Voice and screen recordings are supported. # Arguments - `contact_id`: The identifier of the contact. diff --git a/src/services/ec2.jl b/src/services/ec2.jl index 2cd4b2809a..f47b20456e 100644 --- a/src/services/ec2.jl +++ b/src/services/ec2.jl @@ -11581,18 +11581,14 @@ end describe_account_attributes(params::Dict{String,<:Any}) Describes attributes of your Amazon Web Services account. The following are the supported -account attributes: supported-platforms: Indicates whether your account can launch -instances into EC2-Classic and EC2-VPC, or only into EC2-VPC. default-vpc: The ID of the -default VPC for your account, or none. max-instances: This attribute is no longer -supported. The returned value does not reflect your actual vCPU limit for running On-Demand -Instances. For more information, see On-Demand Instance Limits in the Amazon Elastic -Compute Cloud User Guide. vpc-max-security-groups-per-interface: The maximum number of -security groups that you can assign to a network interface. max-elastic-ips: The maximum -number of Elastic IP addresses that you can allocate for use with EC2-Classic. -vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for -use with EC2-VPC. We are retiring EC2-Classic on August 15, 2022. We recommend that you -migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a -VPC in the Amazon EC2 User Guide. +account attributes: default-vpc: The ID of the default VPC for your account, or none. +max-instances: This attribute is no longer supported. The returned value does not reflect +your actual vCPU limit for running On-Demand Instances. For more information, see On-Demand +Instance Limits in the Amazon Elastic Compute Cloud User Guide. max-elastic-ips: The +maximum number of Elastic IP addresses that you can allocate. supported-platforms: This +attribute is deprecated. vpc-max-elastic-ips: The maximum number of Elastic IP addresses +that you can allocate. vpc-max-security-groups-per-interface: The maximum number of +security groups that you can assign to a network interface. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/ecs.jl b/src/services/ecs.jl index 63128dacb9..73f329f106 100644 --- a/src/services/ecs.jl +++ b/src/services/ecs.jl @@ -244,7 +244,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys isn't specified. If schedulingStrategy is DAEMON then this isn't required. - `"enableECSManagedTags"`: Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in - the Amazon Elastic Container Service Developer Guide. + the Amazon Elastic Container Service Developer Guide. When you use Amazon ECS managed tags, + you need to set the propagateTags request parameter. - `"enableExecuteCommand"`: Determines whether the execute command functionality is turned on for the service. If true, this enables execute command functionality on all containers in the service tasks. @@ -318,7 +319,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"propagateTags"`: Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the - TagResource API action. + TagResource API action. The default is NONE. - `"role"`: The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition doesn't use the @@ -729,6 +730,11 @@ count. You can't use a DELETE_IN_PROGRESS task definition revision to run new ta create new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS task definition revision. A task definition revision will stay in DELETE_IN_PROGRESS status until all the associated tasks and services have been terminated. +When you delete all INACTIVE task definition revisions, the task definition name is not +displayed in the console and not returned in the API. If a task definition revisions are in +the DELETE_IN_PROGRESS state, the task definition name is displayed in the console and +returned in the API. The task definition name is retained by Amazon ECS and the revision is +incremented the next time you create a task definition with that name. # Arguments - `task_definitions`: The family and revision (family:revision) or full Amazon Resource diff --git a/src/services/efs.jl b/src/services/efs.jl index 53fd04f413..c11aa06434 100644 --- a/src/services/efs.jl +++ b/src/services/efs.jl @@ -16,10 +16,14 @@ exposed as the access point's root directory. Applications using the access poin access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points. If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near -the limit of 1000 access points, you may experience a throttling response for these +the limit of 1,000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit. This operation requires permissions for the elasticfilesystem:CreateAccessPoint -action. +action. Access points can be tagged on creation. If tags are specified in the creation +action, IAM performs additional authorization on the elasticfilesystem:TagResource action +to verify if users have permissions to create tags. Therefore, you must grant explicit +permissions to use the elasticfilesystem:TagResource action. For more information, see +Granting permissions to tag resources during creation. # Arguments - `client_token`: A string of up to 64 ASCII characters that Amazon EFS uses to ensure @@ -110,8 +114,13 @@ the file system using the ThroughputMode parameter. After the file system is ful Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount -target. For more information, see Amazon EFS: How it Works. This operation requires -permissions for the elasticfilesystem:CreateFileSystem action. +target. For more information, see Amazon EFS: How it Works. This operation requires +permissions for the elasticfilesystem:CreateFileSystem action. File systems can be tagged +on creation. If tags are specified in the creation action, IAM performs additional +authorization on the elasticfilesystem:TagResource action to verify if users have +permissions to create tags. Therefore, you must grant explicit permissions to use the +elasticfilesystem:TagResource action. For more information, see Granting permissions to tag +resources during creation. # Arguments - `creation_token`: A string of up to 64 ASCII characters. Amazon EFS uses this to ensure diff --git a/src/services/guardduty.jl b/src/services/guardduty.jl index a555bbb3cf..e7c68acd7f 100644 --- a/src/services/guardduty.jl +++ b/src/services/guardduty.jl @@ -392,12 +392,17 @@ end Creates member accounts of the current Amazon Web Services account by specifying a list of Amazon Web Services account IDs. This step is a prerequisite for managing the associated -member accounts either by invitation or through an organization. When using Create Members -as an organizations delegated administrator this action will enable GuardDuty in the added -member accounts, with the exception of the organization delegated administrator account, -which must enable GuardDuty prior to being added as a member. If you are adding accounts by -invitation, use this action after GuardDuty has bee enabled in potential member accounts -and before using InviteMembers. +member accounts either by invitation or through an organization. As a delegated +administrator, using CreateMembers will enable GuardDuty in the added member accounts, with +the exception of the organization delegated administrator account. A delegated +administrator must enable GuardDuty prior to being added as a member. If you are adding +accounts by invitation, before using InviteMembers, use CreateMembers after GuardDuty has +been enabled in potential member accounts. If you disassociate a member from a GuardDuty +delegated administrator, the member account details obtained from this API, including the +associated email addresses, will be retained. This is done so that the delegated +administrator can invoke the InviteMembers API without the need to invoke the CreateMembers +API again. To remove the details associated with a member account, the delegated +administrator must invoke the DeleteMembers API. # Arguments - `account_details`: A list of account ID and email address pairs of the accounts that you @@ -1084,7 +1089,12 @@ end disassociate_from_administrator_account(detector_id) disassociate_from_administrator_account(detector_id, params::Dict{String,<:Any}) -Disassociates the current GuardDuty member account from its administrator account. With +Disassociates the current GuardDuty member account from its administrator account. When you +disassociate an invited member from a GuardDuty delegated administrator, the member account +details obtained from the CreateMembers API, including the associated email addresses, are +retained. This is done so that the delegated administrator can invoke the InviteMembers API +without the need to invoke the CreateMembers API again. To remove the details associated +with a member account, the delegated administrator must invoke the DeleteMembers API. With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disable GuardDuty in a member account. @@ -1120,7 +1130,12 @@ end disassociate_from_master_account(detector_id) disassociate_from_master_account(detector_id, params::Dict{String,<:Any}) -Disassociates the current GuardDuty member account from its administrator account. +Disassociates the current GuardDuty member account from its administrator account. When you +disassociate an invited member from a GuardDuty delegated administrator, the member account +details obtained from the CreateMembers API, including the associated email addresses, are +retained. This is done so that the delegated administrator can invoke the InviteMembers API +without the need to invoke the CreateMembers API again. To remove the details associated +with a member account, the delegated administrator must invoke the DeleteMembers API. # Arguments - `detector_id`: The unique ID of the detector of the GuardDuty member account. @@ -1154,10 +1169,16 @@ end disassociate_members(account_ids, detector_id) disassociate_members(account_ids, detector_id, params::Dict{String,<:Any}) -Disassociates GuardDuty member accounts (to the current administrator account) specified by -the account IDs. With autoEnableOrganizationMembers configuration for your organization set -to ALL, you'll receive an error if you attempt to disassociate a member account before -removing them from your Amazon Web Services organization. +Disassociates GuardDuty member accounts (from the current administrator account) specified +by the account IDs. When you disassociate an invited member from a GuardDuty delegated +administrator, the member account details obtained from the CreateMembers API, including +the associated email addresses, are retained. This is done so that the delegated +administrator can invoke the InviteMembers API without the need to invoke the CreateMembers +API again. To remove the details associated with a member account, the delegated +administrator must invoke the DeleteMembers API. With autoEnableOrganizationMembers +configuration for your organization set to ALL, you'll receive an error if you attempt to +disassociate a member account before removing them from your Amazon Web Services +organization. # Arguments - `account_ids`: A list of account IDs of the GuardDuty member accounts that you want to @@ -1831,10 +1852,21 @@ end invite_members(account_ids, detector_id) invite_members(account_ids, detector_id, params::Dict{String,<:Any}) -Invites other Amazon Web Services accounts (created as members of the current Amazon Web -Services account by CreateMembers) to enable GuardDuty, and allow the current Amazon Web -Services account to view and manage these accounts' findings on their behalf as the -GuardDuty administrator account. +Invites Amazon Web Services accounts to become members of an organization administered by +the Amazon Web Services account that invokes this API. If you are using Amazon Web Services +Organizations to manager your GuardDuty environment, this step is not needed. For more +information, see Managing accounts with Amazon Web Services Organizations. To invite Amazon +Web Services accounts, the first step is to ensure that GuardDuty has been enabled in the +potential member accounts. You can now invoke this API to add accounts by invitation. The +invited accounts can either accept or decline the invitation from their GuardDuty accounts. +Each invited Amazon Web Services account can choose to accept the invitation from only one +Amazon Web Services account. For more information, see Managing GuardDuty accounts by +invitation. After the invite has been accepted and you choose to disassociate a member +account (by using DisassociateMembers) from your account, the details of the member account +obtained by invoking CreateMembers, including the associated email addresses, will be +retained. This is done so that you can invoke InviteMembers without the need to invoke +CreateMembers again. To remove the details associated with a member account, you must also +invoke DeleteMembers. # Arguments - `account_ids`: A list of account IDs of the accounts that you want to invite to GuardDuty diff --git a/src/services/iam.jl b/src/services/iam.jl index 0118ff1d9e..82ab775101 100644 --- a/src/services/iam.jl +++ b/src/services/iam.jl @@ -5066,9 +5066,10 @@ end Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see Working with roles. IAM resource-listing operations return a subset of the available attributes for the resource. -For example, this operation does not return tags, even though they are an attribute of the -returned object. To view all of the information for a role, see GetRole. You can paginate -the results using the MaxItems and Marker parameters. +This operation does not return the following attributes, even though they are an attribute +of the returned object: PermissionsBoundary RoleLastUsed Tags To view all of the +information for a role, see GetRole. You can paginate the results using the MaxItems and +Marker parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5525,10 +5526,10 @@ end Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the Amazon Web Services account. If there are none, the operation returns an empty list. IAM resource-listing operations return a subset of the -available attributes for the resource. For example, this operation does not return tags, -even though they are an attribute of the returned object. To view all of the information -for a user, see GetUser. You can paginate the results using the MaxItems and Marker -parameters. +available attributes for the resource. This operation does not return the following +attributes, even though they are an attribute of the returned object: PermissionsBoundary + Tags To view all of the information for a user, see GetUser. You can paginate the +results using the MaxItems and Marker parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/location.jl b/src/services/location.jl index fb4a0ae3bc..d72b9307b9 100644 --- a/src/services/location.jl +++ b/src/services/location.jl @@ -269,26 +269,26 @@ end batch_update_device_position(tracker_name, updates) batch_update_device_position(tracker_name, updates, params::Dict{String,<:Any}) -Uploads position update data for one or more devices to a tracker resource. Amazon Location -uses the data when it reports the last known device position and position history. Amazon -Location retains location data for 30 days. Position updates are handled based on the -PositionFiltering property of the tracker. When PositionFiltering is set to TimeBased, -updates are evaluated against linked geofence collections, and location data is stored at a -maximum of one position per 30 second interval. If your update frequency is more often than -every 30 seconds, only one update per 30 seconds is stored for each unique device ID. When -PositionFiltering is set to DistanceBased filtering, location data is stored and evaluated -against linked geofence collections only if the device has moved more than 30 m (98.4 ft). -When PositionFiltering is set to AccuracyBased filtering, location data is stored and -evaluated against linked geofence collections only if the device has moved more than the -measured accuracy. For example, if two consecutive updates from a device have a horizontal -accuracy of 5 m and 10 m, the second update is neither stored or evaluated if the device -has moved less than 15 m. If PositionFiltering is set to AccuracyBased filtering, Amazon -Location uses the default value { \"Horizontal\": 0} when accuracy is not provided on a -DevicePositionUpdate. +Uploads position update data for one or more devices to a tracker resource (up to 10 +devices per batch). Amazon Location uses the data when it reports the last known device +position and position history. Amazon Location retains location data for 30 days. Position +updates are handled based on the PositionFiltering property of the tracker. When +PositionFiltering is set to TimeBased, updates are evaluated against linked geofence +collections, and location data is stored at a maximum of one position per 30 second +interval. If your update frequency is more often than every 30 seconds, only one update per +30 seconds is stored for each unique device ID. When PositionFiltering is set to +DistanceBased filtering, location data is stored and evaluated against linked geofence +collections only if the device has moved more than 30 m (98.4 ft). When PositionFiltering +is set to AccuracyBased filtering, location data is stored and evaluated against linked +geofence collections only if the device has moved more than the measured accuracy. For +example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 +m, the second update is neither stored or evaluated if the device has moved less than 15 m. +If PositionFiltering is set to AccuracyBased filtering, Amazon Location uses the default +value { \"Horizontal\": 0} when accuracy is not provided on a DevicePositionUpdate. # Arguments - `tracker_name`: The name of the tracker resource to update. -- `updates`: Contains the position update details for each device. +- `updates`: Contains the position update details for each device, up to 10 devices. """ function batch_update_device_position( @@ -2101,6 +2101,10 @@ existing geofence if a geofence ID is included in the request. polygon or a circle. Including both will return a validation error. Each geofence polygon can have a maximum of 1,000 vertices. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"GeofenceProperties"`: Specifies additional user-defined properties to store with the + Geofence. An array of key-value pairs. """ function put_geofence( CollectionName, GeofenceId, Geometry; aws_config::AbstractAWSConfig=global_aws_config() @@ -2222,6 +2226,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542. FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error. +- `"FilterCategories"`: A list of one or more Amazon Location categories to filter the + returned places. If you include more than one category, the results will include results + that match any of the categories listed. For more information about using categories, + including a list of Amazon Location categories, see Categories and filtering, in the Amazon + Location Service Developer Guide. - `"FilterCountries"`: An optional parameter that limits the search results by returning only suggestions within the provided list of countries. Use the ISO 3166 3-digit country code. For example, Australia uses three upper-case characters: AUS. @@ -2297,6 +2306,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542. FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error. +- `"FilterCategories"`: A list of one or more Amazon Location categories to filter the + returned places. If you include more than one category, the results will include results + that match any of the categories listed. For more information about using categories, + including a list of Amazon Location categories, see Categories and filtering, in the Amazon + Location Service Developer Guide. - `"FilterCountries"`: An optional parameter that limits the search results by returning only places that are in a specified list of countries. Valid values include ISO 3166 3-digit country codes. For example, Australia uses three upper-case characters: AUS. diff --git a/src/services/route_53_domains.jl b/src/services/route_53_domains.jl index 722d025da1..7db27313ed 100644 --- a/src/services/route_53_domains.jl +++ b/src/services/route_53_domains.jl @@ -878,24 +878,22 @@ end register_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact) register_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact, params::Dict{String,<:Any}) -This operation registers a domain. Domains are registered either by Amazon Registrar (for -.com, .net, and .org domains) or by our registrar associate, Gandi (for all other domains). -For some top-level domains (TLDs), this operation requires extra parameters. When you -register a domain, Amazon Route 53 does the following: Creates a Route 53 hosted zone -that has the same name as the domain. Route 53 assigns four name servers to your hosted -zone and automatically updates your domain registration with the names of these name -servers. Enables auto renew, so your domain registration will renew automatically each -year. We'll notify you in advance of the renewal date so you can choose whether to renew -the registration. Optionally enables privacy protection, so WHOIS queries return contact -information either for Amazon Registrar (for .com, .net, and .org domains) or for our -registrar associate, Gandi (for all other TLDs). If you don't enable privacy protection, -WHOIS queries return the information that you entered for the administrative, registrant, -and technical contacts. You must specify the same privacy setting for the administrative, -registrant, and technical contacts. If registration is successful, returns an operation -ID that you can use to track the progress and completion of the action. If the request is -not completed successfully, the domain registrant is notified by email. Charges your -Amazon Web Services account an amount based on the top-level domain. For more information, -see Amazon Route 53 Pricing. +This operation registers a domain. For some top-level domains (TLDs), this operation +requires extra parameters. When you register a domain, Amazon Route 53 does the following: + Creates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four +name servers to your hosted zone and automatically updates your domain registration with +the names of these name servers. Enables auto renew, so your domain registration will +renew automatically each year. We'll notify you in advance of the renewal date so you can +choose whether to renew the registration. Optionally enables privacy protection, so WHOIS +queries return contact for the registrar or the phrase \"REDACTED FOR PRIVACY\", or \"On +behalf of <domain name> owner.\" If you don't enable privacy protection, WHOIS +queries return the information that you entered for the administrative, registrant, and +technical contacts. While some domains may allow different privacy settings per contact, +we recommend specifying the same privacy setting for all contacts. If registration is +successful, returns an operation ID that you can use to track the progress and completion +of the action. If the request is not completed successfully, the domain registrant is +notified by email. Charges your Amazon Web Services account an amount based on the +top-level domain. For more information, see Amazon Route 53 Pricing. # Arguments - `admin_contact`: Provides detailed contact information. For information about the values @@ -1199,17 +1197,15 @@ end transfer_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact) transfer_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact, params::Dict{String,<:Any}) -Transfers a domain from another registrar to Amazon Route 53. When the transfer is -complete, the domain is registered either with Amazon Registrar (for .com, .net, and .org -domains) or with our registrar associate, Gandi (for all other TLDs). For more information -about transferring domains, see the following topics: For transfer requirements, a -detailed procedure, and information about viewing the status of a domain that you're -transferring to Route 53, see Transferring Registration for a Domain to Amazon Route 53 in -the Amazon Route 53 Developer Guide. For information about how to transfer a domain from -one Amazon Web Services account to another, see TransferDomainToAnotherAwsAccount. For -information about how to transfer a domain to another domain registrar, see Transferring a -Domain from Amazon Route 53 to Another Registrar in the Amazon Route 53 Developer Guide. -If the registrar for your domain is also the DNS service provider for the domain, we highly +Transfers a domain from another registrar to Amazon Route 53. For more information about +transferring domains, see the following topics: For transfer requirements, a detailed +procedure, and information about viewing the status of a domain that you're transferring to +Route 53, see Transferring Registration for a Domain to Amazon Route 53 in the Amazon Route +53 Developer Guide. For information about how to transfer a domain from one Amazon Web +Services account to another, see TransferDomainToAnotherAwsAccount. For information +about how to transfer a domain to another domain registrar, see Transferring a Domain from +Amazon Route 53 to Another Registrar in the Amazon Route 53 Developer Guide. If the +registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar @@ -1246,10 +1242,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Nameservers"`: Contains details for the host and glue IP addresses. - `"PrivacyProtectAdminContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information - either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar - associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the - information that you entered for the admin contact. You must specify the same privacy - setting for the administrative, registrant, and technical contacts. Default: true + for the registrar, the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain + name> owner.\". While some domains may allow different privacy settings per contact, we + recommend specifying the same privacy setting for all contacts. Default: true - `"PrivacyProtectRegistrantContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar @@ -1387,7 +1382,8 @@ domain registrant will be notified by email. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AdminContact"`: Provides detailed contact information. -- `"Consent"`: Customer's consent for the owner change request. +- `"Consent"`: Customer's consent for the owner change request. Required if the domain is + not free (consent price is more than 0.00). - `"RegistrantContact"`: Provides detailed contact information. - `"TechContact"`: Provides detailed contact information. """ @@ -1421,11 +1417,11 @@ end update_domain_contact_privacy(domain_name, params::Dict{String,<:Any}) This operation updates the specified domain contact's privacy setting. When privacy -protection is enabled, contact information such as email address is replaced either with -contact information for Amazon Registrar (for .com, .net, and .org domains) or with contact -information for our registrar associate, Gandi. You must specify the same privacy setting -for the administrative, registrant, and technical contacts. This operation affects only -the contact information for the specified contact type (administrative, registrant, or +protection is enabled, your contact information is replaced with contact information for +the registrar or with the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain +name> owner.\" While some domains may allow different privacy settings per contact, we +recommend specifying the same privacy setting for all contacts. This operation affects +only the contact information for the specified contact type (administrative, registrant, or technical). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email. By diff --git a/src/services/s3.jl b/src/services/s3.jl index 7336ea8c8d..dfe4507a12 100644 --- a/src/services/s3.jl +++ b/src/services/s3.jl @@ -226,17 +226,17 @@ Region that you specify for the destination object. For pricing information, see pricing. Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration. Metadata When copying an -object, you can preserve all metadata (default) or specify new metadata. However, the ACL -is not preserved and is set to private for the user making the request. To override the -default ACL setting, specify a new ACL when generating a copy request. For more -information, see Using ACLs. To specify whether you want the object metadata copied from -the source object or replaced with metadata provided in the request, you can optionally add -the x-amz-metadata-directive header. When you grant permissions, you can use the -s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects -are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 -User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, -Resources, and Condition Keys for Amazon S3. x-amz-website-redirect-location is unique to -each object and must be specified in the request headers to copy the value. +object, you can preserve all metadata (the default) or specify new metadata. However, the +access control list (ACL) is not preserved and is set to private for the user making the +request. To override the default ACL setting, specify a new ACL when generating a copy +request. For more information, see Using ACLs. To specify whether you want the object +metadata copied from the source object or replaced with metadata provided in the request, +you can optionally add the x-amz-metadata-directive header. When you grant permissions, you +can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior +when objects are uploaded. For more information, see Specifying Conditions in a Policy in +the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see +Actions, Resources, and Condition Keys for Amazon S3. x-amz-website-redirect-location is +unique to each object and must be specified in the request headers to copy the value. x-amz-copy-source-if Headers To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters: x-amz-copy-source-if-match @@ -256,51 +256,53 @@ request, the encryption setting of the target object is set to the default encry configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses -server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a -customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a -customer-provided key to encrypt the target object copy. When you perform a CopyObject -operation, if you want to use a different type of encryption setting for the target object, -you can use other appropriate encryption-related headers to encrypt the target object with -a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side -encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and -decrypts the data when you access it. If the encryption setting in your request is -different from the default encryption configuration of the destination bucket, the -encryption setting in your request takes precedence. If the source object for the copy is -stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in -your request so that Amazon S3 can decrypt the object for copying. For more information +server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer +server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side +encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding +KMS key, or a customer-provided key to encrypt the target object copy. When you perform a +CopyObject operation, if you want to use a different type of encryption setting for the +target object, you can use other appropriate encryption-related headers to encrypt the +target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With +server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its +data centers and decrypts the data when you access it. If the encryption setting in your +request is different from the default encryption configuration of the destination bucket, +the encryption setting in your request takes precedence. If the source object for the copy +is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information +in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see Using Server-Side Encryption. If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide. Access Control List (ACL)-Specific Request Headers When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services -accounts or to predefined groups defined by Amazon S3. These permissions are then added to -the ACL on the object. For more information, see Access Control List (ACL) Overview and -Managing ACLs Using the REST API. If the bucket that you're copying objects to uses the -bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer -affect permissions. Buckets that use this setting only accept PUT requests that don't -specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the -bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML -format. For more information, see Controlling ownership of objects and disabling ACLs in -the Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for Object -Ownership, all objects written to the bucket by any account will be owned by the bucket -owner. Checksums When copying an object, if it has a checksum, that checksum will be -copied to the new object by default. When you copy the object over, you may optionally +accounts or to predefined groups that are defined by Amazon S3. These permissions are then +added to the ACL on the object. For more information, see Access Control List (ACL) +Overview and Managing ACLs Using the REST API. If the bucket that you're copying objects +to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no +longer affect permissions. Buckets that use this setting only accept PUT requests that +don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as +the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the +XML format. For more information, see Controlling ownership of objects and disabling ACLs +in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for +Object Ownership, all objects written to the bucket by any account will be owned by the +bucket owner. Checksums When copying an object, if it has a checksum, that checksum will +be copied to the new object by default. When you copy the object over, you can optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm header. Storage Class Options You can use the CopyObject action to change the storage class of an -object that is already stored in Amazon S3 using the StorageClass parameter. For more +object that is already stored in Amazon S3 by using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 User Guide. If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject. For more -information, see Copying Objects. Versioning By default, x-amz-copy-source identifies the -current version of an object to copy. If the current version is a delete marker, Amazon S3 -behaves as if the object was deleted. To copy a different version, use the versionId -subresource. If you enable versioning on the target bucket, Amazon S3 generates a unique -version ID for the object being copied. This version ID is different from the version ID of -the source object. Amazon S3 returns the version ID of the copied object in the -x-amz-version-id response header in the response. If you do not enable versioning or -suspend it on the target bucket, the version ID that Amazon S3 generates is always null. -The following operations are related to CopyObject: PutObject GetObject +information, see Copying Objects. Versioning By default, x-amz-copy-source header +identifies the current version of an object to copy. If the current version is a delete +marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use +the versionId subresource. If you enable versioning on the target bucket, Amazon S3 +generates a unique version ID for the object being copied. This version ID is different +from the version ID of the source object. Amazon S3 returns the version ID of the copied +object in the x-amz-version-id response header in the response. If you do not enable +versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is +always null. The following operations are related to CopyObject: PutObject +GetObject # Arguments - `bucket`: The name of the destination bucket. When using this action with an access @@ -395,18 +397,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys object's Object Lock to expire. - `"x-amz-request-payer"`: - `"x-amz-server-side-encryption"`: The server-side encryption algorithm used when storing - this object in Amazon S3 (for example, AES256, aws:kms). -- `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the Amazon Web Services KMS - key ID to use for object encryption. All GET and PUT requests for an object protected by - Amazon Web Services KMS will fail if not made via SSL or using SigV4. For information about - configuring using any of the officially supported Amazon Web Services SDKs and Amazon Web - Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon - S3 User Guide. + this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). +- `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the KMS key ID to use for + object encryption. All GET and PUT requests for an object protected by KMS will fail if + they're not made via SSL or using SigV4. For information about configuring any of the + officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying + the Signature Version in Request Authentication in the Amazon S3 User Guide. - `"x-amz-server-side-encryption-bucket-key-enabled"`: Specifies whether Amazon S3 should - use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS - (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - encryption with SSE-KMS. Specifying this header with a COPY action doesn’t affect - bucket-level settings for S3 Bucket Key. + use an S3 Bucket Key for object encryption with server-side encryption using Key Management + Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 + Bucket Key for object encryption with SSE-KMS. Specifying this header with a COPY action + doesn’t affect bucket-level settings for S3 Bucket Key. - `"x-amz-server-side-encryption-context"`: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. @@ -495,44 +496,36 @@ Region. Accordingly, the signature calculations in Signature Version 4 must use as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, -see Virtual hosting of buckets. Access control lists (ACLs) When creating a bucket using -this operation, you can optionally configure the bucket ACL to specify the accounts or -groups that should be granted specific permissions on the bucket. If your CreateBucket -request sets bucket owner enforced for S3 Object Ownership and specifies a bucket ACL that +see Virtual hosting of buckets. Permissions In addition to s3:CreateBucket, the +following permissions are required when your CreateBucket request includes specific +headers: Access control lists (ACLs) - If your CreateBucket request specifies access +control list (ACL) permissions and the ACL is public-read, public-read-write, +authenticated-read, or if you specify access permissions explicitly through any other ACL, +both s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL for the +CreateBucket request is private or if the request doesn't specify any ACLs, only +s3:CreateBucket permission is needed. Object Lock - If ObjectLockEnabledForBucket is +set to true in your CreateBucket request, s3:PutBucketObjectLockConfiguration and +s3:PutBucketVersioning permissions are required. S3 Object Ownership - If your +CreateBucket request includes the x-amz-object-ownership header, then the +s3:PutBucketOwnershipControls permission is required. By default, ObjectOwnership is set to +BucketOWnerEnforced and ACLs are disabled. We recommend keeping ACLs disabled, except in +uncommon use cases where you must control access for each object individually. If you want +to change the ObjectOwnership setting, you can use the x-amz-object-ownership header in +your CreateBucket request to set the ObjectOwnership setting of your choice. For more +information about S3 Object Ownership, see Controlling object ownership in the Amazon S3 +User Guide. S3 Block Public Access - If your specific use case requires granting public +access to your S3 resources, you can disable Block Public Access. You can create a new +bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock +API. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. By +default, all Block Public Access settings are enabled for new buckets. To avoid inadvertent +exposure of your resources, we recommend keeping the S3 Block Public Access settings +enabled. For more information about S3 Block Public Access, see Blocking public access to +your Amazon S3 storage in the Amazon S3 User Guide. If your CreateBucket request +sets BucketOwnerEnforced for Amazon S3 Object Ownership and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400 -error and returns the InvalidBucketAclWithObjectOwnership error code. For more information, -see Controlling object ownership in the Amazon S3 User Guide. There are two ways to grant -the appropriate permissions using the request headers. Specify a canned ACL using the -x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned -ACLs. Each canned ACL has a predefined set of grantees and permissions. For more -information, see Canned ACL. Specify access permissions explicitly using the -x-amz-grant-read, x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and -x-amz-grant-full-control headers. These headers map to the set of permissions Amazon S3 -supports in an ACL. For more information, see Access control list (ACL) overview. You -specify each grantee as a type=value pair, where the type is one of the following: id -– if the value specified is the canonical user ID of an Amazon Web Services account -uri – if you are granting permissions to a predefined group emailAddress – if the -value specified is the email address of an Amazon Web Services account Using email -addresses to specify a grantee is only supported in the following Amazon Web Services -Regions: US East (N. Virginia) US West (N. California) US West (Oregon) Asia -Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -South America (São Paulo) For a list of all the Amazon S3 supported Regions and -endpoints, see Regions and Endpoints in the Amazon Web Services General Reference. For -example, the following x-amz-grant-read header grants the Amazon Web Services accounts -identified by account IDs permissions to read object data and its metadata: -x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" You can use either a canned -ACL or specify access permissions explicitly. You cannot do both. Permissions In -addition to s3:CreateBucket, the following permissions are required when your CreateBucket -includes specific headers: ACLs - If your CreateBucket request specifies ACL permissions -and the ACL is public-read, public-read-write, authenticated-read, or if you specify access -permissions explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl -permissions are needed. If the ACL the CreateBucket request is private or doesn't specify -any ACLs, only s3:CreateBucket permission is needed. Object Lock - If -ObjectLockEnabledForBucket is set to true in your CreateBucket request, -s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required. -S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership -header, s3:PutBucketOwnershipControls permission is required. The following operations -are related to CreateBucket: PutObject DeleteBucket +error and returns the InvalidBucketAcLWithObjectOwnership error code. For more information, +see Setting Object Ownership on an existing bucket in the Amazon S3 User Guide. The +following operations are related to CreateBucket: PutObject DeleteBucket # Arguments - `bucket`: The name of the bucket to create. @@ -732,15 +725,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys this object in Amazon S3 (for example, AES256, aws:kms). - `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the ID of the symmetric encryption customer managed key to use for object encryption. All GET and PUT requests for - an object protected by Amazon Web Services KMS will fail if not made via SSL or using - SigV4. For information about configuring using any of the officially supported Amazon Web - Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request - Authentication in the Amazon S3 User Guide. + an object protected by KMS will fail if they're not made via SSL or using SigV4. For + information about configuring any of the officially supported Amazon Web Services SDKs and + Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in + the Amazon S3 User Guide. - `"x-amz-server-side-encryption-bucket-key-enabled"`: Specifies whether Amazon S3 should - use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS - (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - encryption with SSE-KMS. Specifying this header with an object action doesn’t affect - bucket-level settings for S3 Bucket Key. + use an S3 Bucket Key for object encryption with server-side encryption using Key Management + Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 + Bucket Key for object encryption with SSE-KMS. Specifying this header with an object action + doesn’t affect bucket-level settings for S3 Bucket Key. - `"x-amz-server-side-encryption-context"`: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. @@ -881,8 +874,8 @@ end Deletes the cors configuration information set for the bucket. To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others. For information about cors, -see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide. The following -operations are related to DeleteBucketCors: PutBucketCors RESTOPTIONSobject +see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide. Related Resources + PutBucketCors RESTOPTIONSobject # Arguments - `bucket`: Specifies the bucket whose cors configuration is being deleted. @@ -1657,6 +1650,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-request-payer"`: """ function get_bucket_accelerate_configuration( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -2662,58 +2656,60 @@ the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification. For more information about returning the ACL of an object, see GetObjectAcl. If the object you are retrieving is stored in the S3 Glacier -or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 -Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first -restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectState -error. For information about restoring archived objects, see Restoring Archived Objects. -Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET -requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side -encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these -types of keys, you’ll get an HTTP 400 BadRequest error. If you encrypt an object by using -server-side encryption with customer-provided encryption keys (SSE-C) when you store the -object in Amazon S3, then when you GET the object, you must use the following headers: -x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key - x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see -Server-Side Encryption (Using Customer-Provided Encryption Keys). Assuming you have the -relevant permission to read object tags, the response also returns the x-amz-tagging-count -header that provides the count of number of tags associated with the object. You can use -GetObjectTagging to retrieve the tag set associated with an object. Permissions You need -the relevant read object (or version) permission for this operation. For more information, -see Specifying Permissions in a Policy. If the object you request does not exist, the error -Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you -have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code -404 (\"no such key\") error. If you don’t have the s3:ListBucket permission, Amazon S3 -will return an HTTP status code 403 (\"access denied\") error. Versioning By default, -the GET action returns the current version of an object. To return a different version, use -the versionId subresource. If you supply a versionId, you need the s3:GetObjectVersion -permission to access a specific version of an object. If you request a specific version, -you do not need to have the s3:GetObject permission. If you request the current version -without a specific version ID, only s3:GetObject permission is required. -s3:GetObjectVersion permission won't be required. If the current version of the object is -a delete marker, Amazon S3 behaves as if the object was deleted and includes -x-amz-delete-marker: true in the response. For more information about versioning, see -PutBucketVersioning. Overriding Response Header Values There are times when you want to -override certain response header values in a GET response. For example, you might override -the Content-Disposition response header value in your GET request. You can override values -for a set of response headers using the following query parameters. These response header -values are sent only on a successful request, that is, when status code 200 OK is returned. -The set of headers you can override using these parameters is a subset of the headers that -Amazon S3 accepts when you create an object. The response headers that you can override for -the GET response are Content-Type, Content-Language, Expires, Cache-Control, -Content-Disposition, and Content-Encoding. To override these header values in the GET -response, you use the following request parameters. You must sign the request, either -using an Authorization header or a presigned URL, when using these parameters. They cannot -be used with an unsigned (anonymous) request. response-content-type -response-content-language response-expires response-cache-control -response-content-disposition response-content-encoding Overriding Response Header -Values If both of the If-Match and If-Unmodified-Since headers are present in the request -as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition -evaluates to false; then, S3 returns 200 OK and the data requested. If both of the -If-None-Match and If-Modified-Since headers are present in the request as follows: -If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to -true; then, S3 returns 304 Not Modified response code. For more information about -conditional requests, see RFC 7232. The following operations are related to GetObject: -ListBuckets GetObjectAcl +Flexible Retrieval or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering +Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object +you must first restore a copy using RestoreObject. Otherwise, this action returns an +InvalidObjectState error. For information about restoring archived objects, see Restoring +Archived Objects. Encryption request headers, like x-amz-server-side-encryption, should not +be sent for GET requests if your object uses server-side encryption with Key Management +Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services +KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys +(SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request +error. If you encrypt an object by using server-side encryption with customer-provided +encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the +object, you must use the following headers: +x-amz-server-side-encryption-customer-algorithm +x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 + For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +Encryption Keys). Assuming you have the relevant permission to read object tags, the +response also returns the x-amz-tagging-count header that provides the count of number of +tags associated with the object. You can use GetObjectTagging to retrieve the tag set +associated with an object. Permissions You need the relevant read object (or version) +permission for this operation. For more information, see Specifying Permissions in a +Policy. If the object that you request doesn’t exist, the error that Amazon S3 returns +depends on whether you also have the s3:ListBucket permission. If you have the +s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not +Found) error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +status code 403 (\"access denied\") error. Versioning By default, the GET action returns +the current version of an object. To return a different version, use the versionId +subresource. If you supply a versionId, you need the s3:GetObjectVersion permission to +access a specific version of an object. If you request a specific version, you do not need +to have the s3:GetObject permission. If you request the current version without a specific +version ID, only s3:GetObject permission is required. s3:GetObjectVersion permission won't +be required. If the current version of the object is a delete marker, Amazon S3 behaves +as if the object was deleted and includes x-amz-delete-marker: true in the response. For +more information about versioning, see PutBucketVersioning. Overriding Response Header +Values There are times when you want to override certain response header values in a GET +response. For example, you might override the Content-Disposition response header value in +your GET request. You can override values for a set of response headers using the following +query parameters. These response header values are sent only on a successful request, that +is, when status code 200 OK is returned. The set of headers you can override using these +parameters is a subset of the headers that Amazon S3 accepts when you create an object. The +response headers that you can override for the GET response are Content-Type, +Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To +override these header values in the GET response, you use the following request parameters. + You must sign the request, either using an Authorization header or a presigned URL, when +using these parameters. They cannot be used with an unsigned (anonymous) request. +response-content-type response-content-language response-expires +response-cache-control response-content-disposition response-content-encoding +Overriding Response Header Values If both of the If-Match and If-Unmodified-Since headers +are present in the request as follows: If-Match condition evaluates to true, and; +If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data +requested. If both of the If-None-Match and If-Modified-Since headers are present in the +request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since +condition evaluates to true; then, S3 returns 304 Not Modified response code. For more +information about conditional requests, see RFC 7232. The following operations are +related to GetObject: ListBuckets GetObjectAcl # Arguments - `bucket`: The bucket name containing the object. When using this action with an access @@ -3339,30 +3335,31 @@ Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve the exception beyond these error codes. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following -headers: x-amz-server-side-encryption-customer-algorithm -x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 -For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +headers: x-amz-server-side-encryption-customer-algorithm +x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 + For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys). Encryption request headers, like x-amz-server-side-encryption, should -not be sent for GET requests if your object uses server-side encryption with KMS keys -(SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If -your object does use these types of keys, you’ll get an HTTP 400 BadRequest error. The -last modified property in this case is the creation date of the object. Request headers -are limited to 8 KB in size. For more information, see Common Request Headers. Consider the -following when using request headers: Consideration 1 – If both of the If-Match and -If-Unmodified-Since headers are present in the request as follows: If-Match condition -evaluates to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon -S3 returns 200 OK and the data requested. Consideration 2 – If both of the -If-None-Match and If-Modified-Since headers are present in the request as follows: -If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates -to true; Then Amazon S3 returns the 304 Not Modified response code. For more +not be sent for GET requests if your object uses server-side encryption with Key Management +Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services +KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys +(SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request +error. The last modified property in this case is the creation date of the object. +Request headers are limited to 8 KB in size. For more information, see Common Request +Headers. Consider the following when using request headers: Consideration 1 – If both +of the If-Match and If-Unmodified-Since headers are present in the request as follows: +If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to +false; Then Amazon S3 returns 200 OK and the data requested. Consideration 2 – If +both of the If-None-Match and If-Modified-Since headers are present in the request as +follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition +evaluates to true; Then Amazon S3 returns the 304 Not Modified response code. For more information about conditional requests, see RFC 7232. Permissions You need the relevant read object (or version) permission for this operation. For more information, see Actions, -resources, and condition keys for Amazon S3. If the object you request does not exist, the -error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If -you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code -404 (\"no such key\") error. If you don’t have the s3:ListBucket permission, Amazon S3 -returns an HTTP status code 403 (\"access denied\") error. The following actions are -related to HeadObject: GetObject GetObjectAttributes +resources, and condition keys for Amazon S3. If the object you request doesn't exist, the +error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. + If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status +code 404 error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an +HTTP status code 403 error. The following actions are related to HeadObject: +GetObject GetObjectAttributes # Arguments - `bucket`: The name of the bucket containing the object. When using this action with an @@ -3728,6 +3725,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-request-payer"`: """ function list_multipart_uploads(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -3786,6 +3784,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-request-payer"`: """ function list_object_versions(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -4108,12 +4107,12 @@ longer affect permissions. You must use policies to grant access to your bucket objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide. -Permissions You can set access permissions using one of the following methods: Specify a -canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, -known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. -Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot -use other access control-specific headers in your request. For more information, see Canned -ACL. Specify access permissions explicitly with the x-amz-grant-read, +Permissions You can set access permissions by using one of the following methods: +Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of +predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and +permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, +you cannot use other access control-specific headers in your request. For more information, +see Canned ACL. Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these @@ -4367,18 +4366,20 @@ This action uses the encryption subresource to configure default encryption and Bucket Keys for an existing bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption -with an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). If you -specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. -For information about bucket default encryption, see Amazon S3 bucket default encryption in -the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket -Keys in the Amazon S3 User Guide. This action requires Amazon Web Services Signature -Version 4. For more information, see Authenticating Requests (Amazon Web Services -Signature Version 4). To use this operation, you must have permissions to perform the -s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The -bucket owner can grant this permission to others. For more information about permissions, -see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -Your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related -to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption +with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with +Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided +keys (SSE-C). If you specify default encryption by using SSE-KMS, you can also configure +Amazon S3 Bucket Keys. For information about bucket default encryption, see Amazon S3 +bucket default encryption in the Amazon S3 User Guide. For more information about S3 Bucket +Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide. This action requires Amazon +Web Services Signature Version 4. For more information, see Authenticating Requests +(Amazon Web Services Signature Version 4). To use this operation, you must have +permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this +permission by default. The bucket owner can grant this permission to others. For more +information about permissions, see Permissions Related to Bucket Subresource Operations and +Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. The +following operations are related to PutBucketEncryption: GetBucketEncryption +DeleteBucketEncryption # Arguments - `bucket`: Specifies default encryption for a bucket using server-side encryption with @@ -4690,23 +4691,23 @@ backward compatibility. For the related API description, see PutBucketLifecycle. You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the -following: Filter identifying a subset of objects to which the rule applies. The filter -can be based on a key name prefix, object tags, or a combination of both. Status whether -the rule is in effect. One or more lifecycle transition and expiration actions that you -want Amazon S3 to perform on the objects identified by the filter. If the state of your -bucket is versioning-enabled or versioning-suspended, you can have many versions of the -same object (one current version and zero or more noncurrent versions). Amazon S3 provides -predefined actions that you can specify for current and noncurrent object versions. For -more information, see Object Lifecycle Management and Lifecycle Configuration Elements. -Permissions By default, all Amazon S3 resources are private, including buckets, objects, -and related subresources (for example, lifecycle configuration and website configuration). -Only the resource owner (that is, the Amazon Web Services account that created it) can -access the resource. The resource owner can optionally grant access permissions to others -by writing an access policy. For this operation, a user must get the -s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. Explicit -deny also supersedes any other permissions. If you want to block users or accounts from -removing or deleting objects from your bucket, you must deny them permissions for the -following actions: s3:DeleteObject s3:DeleteObjectVersion +following: A filter identifying a subset of objects to which the rule applies. The filter +can be based on a key name prefix, object tags, or a combination of both. A status +indicating whether the rule is in effect. One or more lifecycle transition and expiration +actions that you want Amazon S3 to perform on the objects identified by the filter. If the +state of your bucket is versioning-enabled or versioning-suspended, you can have many +versions of the same object (one current version and zero or more noncurrent versions). +Amazon S3 provides predefined actions that you can specify for current and noncurrent +object versions. For more information, see Object Lifecycle Management and Lifecycle +Configuration Elements. Permissions By default, all Amazon S3 resources are private, +including buckets, objects, and related subresources (for example, lifecycle configuration +and website configuration). Only the resource owner (that is, the Amazon Web Services +account that created it) can access the resource. The resource owner can optionally grant +access permissions to others by writing an access policy. For this operation, a user must +get the s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. +An explicit deny also supersedes any other permissions. If you want to block users or +accounts from removing or deleting objects from your bucket, you must deny them permissions +for the following actions: s3:DeleteObject s3:DeleteObjectVersion s3:PutLifecycleConfiguration For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources. The following operations are related to PutBucketLifecycleConfiguration: Examples of Lifecycle Configuration @@ -4766,15 +4767,15 @@ log delivery uses the bucket owner enforced setting for S3 Object Ownership, you the Grantee request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) to whom you're -assigning access rights (using request elements) in the following ways: By the person's -ID: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" +assigning access rights (by using request elements) in the following ways: By the +person's ID: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName>< -;>GranteesEmail<></DisplayName> </Grantee> DisplayName is optional +;>GranteesEmail<></DisplayName> </Grantee> DisplayName is optional and ignored in the request. By Email address: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<> ;</EmailAddress></Grantee> The grantee is resolved to the CanonicalUser and, -in a response to a GET Object acl request, appears as the CanonicalUser. By URI: +in a response to a GETObjectAcl request, appears as the CanonicalUser. By URI: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/Authenticate dUsers<></URI></Grantee> To enable logging, you use LoggingEnabled @@ -4853,10 +4854,10 @@ Subresource Operations and Managing Access Permissions to Your Amazon S3 Resourc information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch. The following operations are related to PutBucketMetricsConfiguration: DeleteBucketMetricsConfiguration GetBucketMetricsConfiguration -ListBucketMetricsConfigurations GetBucketLifecycle has the following special error: -Error code: TooManyConfigurations Description: You are attempting to create a new -configuration but have already reached the 1,000-configuration limit. HTTP Status Code: -HTTP 400 Bad Request +ListBucketMetricsConfigurations PutBucketMetricsConfiguration has the following special +error: Error code: TooManyConfigurations Description: You are attempting to create a +new configuration but have already reached the 1,000-configuration limit. HTTP Status +Code: HTTP 400 Bad Request # Arguments - `bucket`: The name of the bucket for which the metrics configuration is set. @@ -4986,11 +4987,11 @@ disable notifications by adding the empty NotificationConfiguration element. For information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference. By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can -use a bucket policy to grant permission to other users to set this configuration with -s3:PutBucketNotification permission. The PUT notification is an atomic operation. For -example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda -function configurations. When you send a PUT request with this configuration, Amazon S3 -sends test messages to your SNS topic. If the message fails, the entire PUT action will +use a bucket policy to grant permission to other users to set this configuration with the +required s3:PutBucketNotification permission. The PUT notification is an atomic operation. +For example, suppose your notification configuration includes SNS topic, SQS queue, and +Lambda function configurations. When you send a PUT request with this configuration, Amazon +S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket. If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the @@ -5577,24 +5578,24 @@ successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging in your IAM permissions. The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the -Amazon S3 User Guide. You have three mutually exclusive options to protect data using +Amazon S3 User Guide. You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon -Web Services KMS keys (SSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts -data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You -can optionally tell Amazon S3 to encrypt data at by rest using server-side encryption with -other key options. For more information, see Using Server-Side Encryption. When adding a -new object, you can use headers to grant ACL-based permissions to individual Amazon Web -Services accounts or to predefined groups defined by Amazon S3. These permissions are then -added to the ACL on the object. By default, all objects are private. Only the owner has -full access control. For more information, see Access Control List (ACL) Overview and -Managing ACLs Using the REST API. If the bucket that you're uploading objects to uses the -bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer -affect permissions. Buckets that use this setting only accept PUT requests that don't -specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the -bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML -format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon -Web Services accounts) fail and return a 400 error with the error code +Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 +encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by +default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side +encryption with other key options. For more information, see Using Server-Side Encryption. +When adding a new object, you can use headers to grant ACL-based permissions to individual +Amazon Web Services accounts or to predefined groups defined by Amazon S3. These +permissions are then added to the ACL on the object. By default, all objects are private. +Only the owner has full access control. For more information, see Access Control List (ACL) +Overview and Managing ACLs Using the REST API. If the bucket that you're uploading objects +to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no +longer affect permissions. Buckets that use this setting only accept PUT requests that +don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as +the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the +XML format. PUT requests that contain other ACLs (for example, custom grants to certain +Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account @@ -5695,19 +5696,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - `"x-amz-server-side-encryption"`: The server-side encryption algorithm used when storing - this object in Amazon S3 (for example, AES256, aws:kms). + this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). - `"x-amz-server-side-encryption-aws-kms-key-id"`: If x-amz-server-side-encryption has a - valid value of aws:kms, this header specifies the ID of the Amazon Web Services Key - Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that - was used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not - provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services - managed key to protect the data. If the KMS key does not exist in the same account issuing - the command, you must use the full ARN and not just the ID. + valid value of aws:kms or aws:kms:dsse, this header specifies the ID of the Key Management + Service (KMS) symmetric encryption customer managed key that was used for the object. If + you specify x-amz-server-side-encryption:aws:kms or + x-amz-server-side-encryption:aws:kms:dsse, but do not provide + x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed + key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's + issuing the command, you must use the full ARN and not just the ID. - `"x-amz-server-side-encryption-bucket-key-enabled"`: Specifies whether Amazon S3 should - use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS - (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect - bucket-level settings for S3 Bucket Key. + use an S3 Bucket Key for object encryption with server-side encryption using Key Management + Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 + Bucket Key for object encryption with SSE-KMS. Specifying this header with a PUT action + doesn’t affect bucket-level settings for S3 Bucket Key. - `"x-amz-server-side-encryption-context"`: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This @@ -6286,67 +6288,67 @@ operation, you must have permissions to perform the s3:RestoreObject action. The owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. -Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval or S3 -Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 -Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the -S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first -initiate a restore request, and then wait until a temporary copy of the object is -available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 -Standard storage class in your S3 bucket. To access an archived object, you must restore -the object for the duration (number of days) that you specify. For objects in the Archive -Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a -restore request, and then wait until the object is moved into the Frequent Access tier. To -restore a specific object version, you can provide a version ID. If you don't provide a -version ID, Amazon S3 restores the current version. When restoring an archived object, you -can specify one of the following data access tier options in the Tier element of the -request body: Expedited - Expedited retrievals allow you to quickly access your data -stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive -tier when occasional urgent requests for restoring archives are required. For all but the -largest archived objects (250 MB+), data accessed using Expedited retrievals is typically -made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity -for Expedited retrievals is available when you need it. Expedited retrievals and -provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive -storage class or S3 Intelligent-Tiering Deep Archive tier. Standard - Standard -retrievals allow you to access any of your archived objects within several hours. This is -the default option for retrieval requests that do not specify the retrieval option. -Standard retrievals typically finish within 3–5 hours for objects stored in the S3 -Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They -typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage -class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects -stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for objects stored in the -S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to -retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically -finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval storage -class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost -retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish -within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 -Intelligent-Tiering Deep Archive tier. For more information about archive retrieval -options and provisioned capacity for Expedited data access, see Restoring Archived Objects -in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the -restore speed to a faster speed while it is in progress. For more information, see -Upgrading the speed of an in-progress restore in the Amazon S3 User Guide. To get the -status of object restoration, you can send a HEAD request. Operations return the -x-amz-restore header, which provides information about the restoration status, in the -response. You can use Amazon S3 event notifications to notify you when a restore is -initiated or completed. For more information, see Configuring Amazon S3 Event Notifications -in the Amazon S3 User Guide. After restoring an archived object, you can update the -restoration period by reissuing the request with a new period. Amazon S3 updates the -restoration period relative to the current time and charges only for the request-there are -no data transfer charges. You cannot update the restoration period when Amazon S3 is -actively processing your current restore request for the object. If your bucket has a -lifecycle configuration with a rule that includes an expiration action, the object -expiration overrides the life span that you specify in a restore request. For example, if -you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, -Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, -see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User -Guide. Responses A successful action returns either the 200 OK or 202 Accepted status -code. If the object is not previously restored, then Amazon S3 returns 202 Accepted in -the response. If the object is previously restored, Amazon S3 returns 200 OK in the -response. Special errors: Code: RestoreAlreadyInProgress Cause: Object restore -is already in progress. (This error does not apply to SELECT type requests.) HTTP -Status Code: 409 Conflict SOAP Fault Code Prefix: Client Code: -GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently not -available. Try again later. (Returned if there is insufficient capacity to process the +Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval Flexible +Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or +S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in +the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage +classes, you must first initiate a restore request, and then wait until a temporary copy of +the object is available. If you want a permanent copy of the object, create a copy of it in +the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you +must restore the object for the duration (number of days) that you specify. For objects in +the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first +initiate a restore request, and then wait until the object is moved into the Frequent +Access tier. To restore a specific object version, you can provide a version ID. If you +don't provide a version ID, Amazon S3 restores the current version. When restoring an +archived object, you can specify one of the following data access tier options in the Tier +element of the request body: Expedited - Expedited retrievals allow you to quickly +access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage +class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring +archives are required. For all but the largest archived objects (250 MB+), data accessed +using Expedited retrievals is typically made available within 1–5 minutes. Provisioned +capacity ensures that retrieval capacity for Expedited retrievals is available when you +need it. Expedited retrievals and provisioned capacity are not available for objects stored +in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. + Standard - Standard retrievals allow you to access any of your archived objects within +several hours. This is the default option for retrieval requests that do not specify the +retrieval option. Standard retrievals typically finish within 3–5 hours for objects +stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 +Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored +in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +Standard retrievals are free for objects stored in S3 Intelligent-Tiering. Bulk - Bulk +retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 +Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even +petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for +objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 +Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option +when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours +for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +Deep Archive tier. For more information about archive retrieval options and provisioned +capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User +Guide. You can use Amazon S3 restore speed upgrade to change the restore speed to a faster +speed while it is in progress. For more information, see Upgrading the speed of an +in-progress restore in the Amazon S3 User Guide. To get the status of object restoration, +you can send a HEAD request. Operations return the x-amz-restore header, which provides +information about the restoration status, in the response. You can use Amazon S3 event +notifications to notify you when a restore is initiated or completed. For more information, +see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide. After restoring +an archived object, you can update the restoration period by reissuing the request with a +new period. Amazon S3 updates the restoration period relative to the current time and +charges only for the request-there are no data transfer charges. You cannot update the +restoration period when Amazon S3 is actively processing your current restore request for +the object. If your bucket has a lifecycle configuration with a rule that includes an +expiration action, the object expiration overrides the life span that you specify in a +restore request. For example, if you restore an object copy for 10 days, but the object is +scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information +about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle +Management in Amazon S3 User Guide. Responses A successful action returns either the 200 +OK or 202 Accepted status code. If the object is not previously restored, then Amazon S3 +returns 202 Accepted in the response. If the object is previously restored, Amazon S3 +returns 200 OK in the response. Special errors: Code: RestoreAlreadyInProgress +Cause: Object restore is already in progress. (This error does not apply to SELECT type +requests.) HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client +Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently +not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A The following operations are related to RestoreObject: PutBucketLifecycleConfiguration @@ -6442,12 +6444,15 @@ information, see Appendix: SelectObjectContent Response. GetObject Support The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject. Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), -you cannot specify the range of bytes of an object to return. GLACIER, DEEP_ARCHIVE and -REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or -REDUCED_REDUNDANCY storage classes. For more information, about storage classes see Storage -Classes in the Amazon S3 User Guide. Special Errors For a list of special errors for -this operation, see List of SELECT Object Content Error Codes The following operations -are related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration +you cannot specify the range of bytes of an object to return. The GLACIER, DEEP_ARCHIVE, +and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS +access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the +GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the +ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage +class. For more information about storage classes, see Using Amazon S3 storage classes in +the Amazon S3 User Guide. Special Errors For a list of special errors for this +operation, see List of SELECT Object Content Error Codes The following operations are +related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration PutBucketLifecycleConfiguration # Arguments diff --git a/src/services/sagemaker.jl b/src/services/sagemaker.jl index 80526ac426..a8f93c3dd4 100644 --- a/src/services/sagemaker.jl +++ b/src/services/sagemaker.jl @@ -500,9 +500,13 @@ end create_auto_mljob(auto_mljob_name, input_data_config, output_data_config, role_arn) create_auto_mljob(auto_mljob_name, input_data_config, output_data_config, role_arn, params::Dict{String,<:Any}) -Creates an Autopilot job. Find the best-performing model after you run an Autopilot job by -calling DescribeAutoMLJob. For information about how to use Autopilot, see Automate Model -Development with Amazon SageMaker Autopilot. +Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. Find the +best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 +(recommended) or DescribeAutoMLJob. CreateAutoMLJob only accepts tabular input data. We +recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the +same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for +problem types such as image or text classification. Find guidelines about how to migrate +CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. # Arguments - `auto_mljob_name`: Identifies an Autopilot job. The name must be unique to your account @@ -519,9 +523,9 @@ Development with Amazon SageMaker Autopilot. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoMLJobConfig"`: A collection of settings used to configure an AutoML job. -- `"AutoMLJobObjective"`: Defines the objective metric used to measure the predictive - quality of an AutoML job. You provide an AutoMLJobObjectiveMetricName and Autopilot infers - whether to minimize or maximize it. For CreateAutoMLJobV2, only Accuracy is supported. +- `"AutoMLJobObjective"`: Specifies a metric to minimize or maximize as the objective of a + job. If not specified, the default objective metric depends on the problem type. See + AutoMLJobObjective for the default values. - `"GenerateCandidateDefinitionsOnly"`: Generates possible candidates without training the models. A candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings. @@ -584,22 +588,28 @@ end create_auto_mljob_v2(auto_mljob_input_data_config, auto_mljob_name, auto_mlproblem_type_config, output_data_config, role_arn) create_auto_mljob_v2(auto_mljob_input_data_config, auto_mljob_name, auto_mlproblem_type_config, output_data_config, role_arn, params::Dict{String,<:Any}) -Creates an Amazon SageMaker AutoML job that uses non-tabular data such as images or text -for Computer Vision or Natural Language Processing problems. Find the resulting model after -you run an AutoML job V2 by calling DescribeAutoMLJobV2. To create an AutoMLJob using -tabular data, see CreateAutoMLJob. This API action is callable through SageMaker Canvas -only. Calling it directly from the CLI or an SDK results in an error. +Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. We +recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the +same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for +problem types such as image or text classification. Find guidelines about how to migrate +CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For +the list of available problem types supported by CreateAutoMLJobV2, see +AutoMLProblemTypeConfig. Find the best-performing model after you run an AutoML job V2 by +calling DescribeAutoMLJobV2. Calling DescribeAutoMLJob on a AutoML job V2 results in an +error. # Arguments - `auto_mljob_input_data_config`: An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to InputDataConfig supported - by CreateAutoMLJob. The supported formats depend on the problem type: - ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile TextClassification: - S3Prefix + by CreateAutoMLJob. The supported formats depend on the problem type: For Tabular problem + types: S3Prefix, ManifestFile. For ImageClassification: S3Prefix, ManifestFile, + AugmentedManifestFile. For TextClassification: S3Prefix. - `auto_mljob_name`: Identifies an Autopilot job. The name must be unique to your account and is case insensitive. - `auto_mlproblem_type_config`: Defines the configuration settings of one of the supported - problem types. + problem types. For tabular problem types, you must either specify the type of supervised + learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType) and provide the + AutoMLJobObjective, or none at all. - `output_data_config`: Provides information about encryption and the Amazon S3 output path needed to store artifacts from an AutoML job. - `role_arn`: The ARN of the role that is used to access the data. @@ -607,13 +617,14 @@ only. Calling it directly from the CLI or an SDK results in an error. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoMLJobObjective"`: Specifies a metric to minimize or maximize as the objective of a - job. For CreateAutoMLJobV2, only Accuracy is supported. + job. If not specified, the default objective metric depends on the problem type. For the + list of default values per problem type, see AutoMLJobObjective. For tabular problem + types, you must either provide the AutoMLJobObjective and indicate the type of supervised + learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType), or none. - `"DataSplitConfig"`: This structure specifies how to split the data into train and - validation datasets. If you are using the V1 API (for example CreateAutoMLJob) or the V2 - API for Natural Language Processing problems (for example CreateAutoMLJobV2 with a - TextClassificationJobConfig problem type), the validation and training datasets must - contain the same headers. Also, for V1 API jobs, the validation dataset must be less than 2 - GB in size. + validation datasets. The validation and training datasets must contain the same headers. + For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB + in size. - `"ModelDeployConfig"`: Specifies how to generate the endpoint name for an automatic one-click Autopilot model deployment. - `"SecurityConfig"`: The security configuration for traffic encryption or Amazon VPC @@ -6257,7 +6268,7 @@ end describe_auto_mljob(auto_mljob_name) describe_auto_mljob(auto_mljob_name, params::Dict{String,<:Any}) -Returns information about an Amazon SageMaker AutoML job. +Returns information about an AutoML job created by calling CreateAutoMLJob. # Arguments - `auto_mljob_name`: Requests information about an AutoML job using its unique name. @@ -6292,12 +6303,10 @@ end describe_auto_mljob_v2(auto_mljob_name) describe_auto_mljob_v2(auto_mljob_name, params::Dict{String,<:Any}) -Returns information about an Amazon SageMaker AutoML V2 job. This API action is callable -through SageMaker Canvas only. Calling it directly from the CLI or an SDK results in an -error. +Returns information about an AutoML job V2 created by calling CreateAutoMLJobV2. # Arguments -- `auto_mljob_name`: Requests information about an AutoML V2 job using its unique name. +- `auto_mljob_name`: Requests information about an AutoML job V2 using its unique name. """ function describe_auto_mljob_v2(