From a00b8b018fd294496a1fe6350011e43cfe09927c Mon Sep 17 00:00:00 2001
From: awstools
If you provide an additional checksum
+ * value in your MultipartUpload
requests and the
+ * object is encrypted with Key Management Service, you must have permission to use the
+ * kms:Decrypt
action for the
+ * CompleteMultipartUpload
request to succeed.
@@ -94,13 +99,9 @@ export interface CompleteMultipartUploadCommandOutput extends CompleteMultipartU
* Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see
* CreateSession
* .
If you provide an additional checksum
- * value in your MultipartUpload
requests and the
- * object is encrypted with Key Management Service, you must have permission to use the
- * kms:Decrypt
action for the
- * CompleteMultipartUpload
request to succeed.
If the object is encrypted with
+ * SSE-KMS, you must also have the
+ * kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
ReadOnly
on the copy destination bucket.
*
*
+ * If the object is encrypted with
+ * SSE-KMS, you must also have the
+ * kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the * Amazon S3 User Guide.
* diff --git a/clients/client-s3/src/commands/CreateMultipartUploadCommand.ts b/clients/client-s3/src/commands/CreateMultipartUploadCommand.ts index cda4d6566107..532740654d77 100644 --- a/clients/client-s3/src/commands/CreateMultipartUploadCommand.ts +++ b/clients/client-s3/src/commands/CreateMultipartUploadCommand.ts @@ -216,7 +216,27 @@ export interface CreateMultipartUploadCommandOutput extends CreateMultipartUploa * *
- * Directory buckets -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your
+ * CreateSession
requests or PUT
object requests. Then, new objects
+ * are automatically encrypted with the desired encryption settings. For more
+ * information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
+ * In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request.
+ * You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request.
+ * You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and
+ * Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
+ *
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the
+ * CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request.
+ * So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy),
+ * the encryption request headers must match the default encryption configuration of the directory bucket.
+ *
+ *
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation,
+ * the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint APIs on directory buckets. - * For more information about Zonal endpoint APIs that include the Availability Zone in the request endpoint, see + *
Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. + * For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see * S3 Express One Zone APIs in the Amazon S3 User Guide. *
*To make Zonal endpoint API requests on a directory bucket, use the CreateSession
@@ -38,7 +43,7 @@ export interface CreateSessionCommandOutput extends CreateSessionOutput, __Metad
* bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the
* CreateSession
API request on the bucket, which returns temporary security
* credentials that include the access key ID, secret access key, session token, and
- * expiration. These credentials have associated permissions to access the Zonal endpoint APIs. After
+ * expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After
* the session is created, you don’t need to use other policies to grant permissions to each
* Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by
* applying the temporary security credentials of the session to the request headers and
@@ -62,12 +67,12 @@ export interface CreateSessionCommandOutput extends CreateSessionOutput, __Metad
*
*
- * CopyObject
API operation - Unlike other Zonal endpoint APIs, the CopyObject
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the CopyObject
API operation on directory buckets, see CopyObject.
CopyObject
API operation - Unlike other Zonal endpoint API operations, the CopyObject
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the CopyObject
API operation on directory buckets, see CopyObject.
*
*
- * HeadBucket
API operation - Unlike other Zonal endpoint APIs, the HeadBucket
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket
API operation on directory buckets, see HeadBucket.
HeadBucket
API operation - Unlike other Zonal endpoint API operations, the HeadBucket
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket
API operation on directory buckets, see HeadBucket.
* To grant cross-account access to Zonal endpoint APIs, the bucket policy should also grant both accounts the s3express:CreateSession
permission.
To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession
permission.
If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your
+ * CreateSession
requests or PUT
object requests. Then, new objects
+ * are automatically encrypted with the desired encryption settings. For more
+ * information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, + * you authenticate and authorize requests through CreateSession for low latency. + * To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.
+ *
+ * Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3
) isn't supported.
+ * After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.
+ *
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API,
+ * you can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) from the CreateSession
request.
+ * You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and
+ * Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
+ *
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the
+ * CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request.
+ * Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy),
+ * it's not supported to override the values of the encryption settings from the CreateSession
request.
+ *
+ *
This operation is not supported by directory buckets.
+ *This implementation of the DELETE action resets the default encryption for the bucket as + * server-side encryption with Amazon S3 managed keys (SSE-S3).
+ *+ * General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket + * Default Encryption in the Amazon S3 User Guide.
+ *+ * Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior + * for directory buckets.
+ *This implementation of the DELETE action resets the default encryption for the bucket as - * server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket - * default encryption feature, see Amazon S3 Bucket Default Encryption - * in the Amazon S3 User Guide.
- *To use this operation, you must have permissions to perform the
- * s3:PutEncryptionConfiguration
action. The bucket owner has this permission
- * by default. The bucket owner can grant this permission to others. For more information
- * about permissions, see Permissions Related to Bucket Subresource Operations and Managing
- * Access Permissions to your Amazon S3 Resources in the
- * Amazon S3 User Guide.
+ * General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy.
+ * The bucket owner has this permission
+ * by default. The bucket owner can grant this permission to others. For more information
+ * about permissions, see Permissions Related to Bucket Operations and Managing
+ * Access Permissions to Your Amazon S3 Resources.
+ * Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
+ * Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to DeleteBucketEncryption
:
This operation is not supported by directory buckets.
- *Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets + *
Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets * have a default encryption configuration that uses server-side encryption with Amazon S3 managed - * keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket - * Default Encryption in the Amazon S3 User Guide.
- *To use this operation, you must have permission to perform the
- * s3:GetEncryptionConfiguration
action. The bucket owner has this permission
- * by default. The bucket owner can grant this permission to others. For more information
- * about permissions, see Permissions Related to Bucket Subresource Operations and Managing
- * Access Permissions to Your Amazon S3 Resources.
+ * General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket + * Default Encryption in the Amazon S3 User Guide.
+ *+ * Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior + * for directory buckets.
+ *
+ * General purpose bucket permissions - The s3:GetEncryptionConfiguration
permission is required in a policy.
+ * The bucket owner has this permission
+ * by default. The bucket owner can grant this permission to others. For more information
+ * about permissions, see Permissions Related to Bucket Operations and Managing
+ * Access Permissions to Your Amazon S3 Resources.
+ * Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
+ * Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to GetBucketEncryption
:
* General purpose bucket permissions - To use
- * GetObjectAttributes
, you must have READ access to the object. The permissions that you need to use this operation with depend on whether the
+ * GetObjectAttributes
, you must have READ access to the object. The permissions that you need to use this operation depend on whether the
* bucket is versioned. If the bucket is versioned, you need both the
* s3:GetObjectVersion
and s3:GetObjectVersionAttributes
* permissions for this operation. If the bucket is not versioned, you need the
@@ -83,6 +83,9 @@ export interface GetObjectAttributesCommandOutput extends GetObjectAttributesOut
* Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see
* CreateSession
* .
If the object is encrypted with
+ * SSE-KMS, you must also have the
+ * kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
- * Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your
+ * CreateSession
requests or PUT
object requests. Then, new objects
+ * are automatically encrypted with the desired encryption settings. For more
+ * information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
* CreateSession
* .
+ * If the object is encrypted using
+ * SSE-KMS, you must also have the
+ * kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
GetObject
requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS)
* keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject
requests for the object that uses
* these types of keys, you’ll get an HTTP 400 Bad Request
error.
+ * + * Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more + * information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
* *CreateSession
* .
+ * If you enable x-amz-checksum-mode
in the request and the object is encrypted with
+ * Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the
+ * kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
- * Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
This operation is not supported by directory buckets.
- *This action uses the encryption
subresource to configure default encryption
+ *
This operation configures default encryption * and Amazon S3 Bucket Keys for an existing bucket.
+ *
+ * Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+ *
. Virtual-hosted-style requests aren't supported.
+ * For more information, see Regional and Zonal endpoints in the
+ * Amazon S3 User Guide.
By default, all buckets have a default encryption configuration that uses server-side - * encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption - * for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or - * dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using - * SSE-KMS, you can also configure Amazon S3 Bucket - * Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.
+ * encryption with Amazon S3 managed keys (SSE-S3). + *+ * General purpose buckets + *
+ *You can optionally configure default encryption + * for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or + * dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). + * If you specify default encryption by using + * SSE-KMS, you can also configure Amazon S3 Bucket + * Keys. For information about the bucket default + * encryption feature, see Amazon S3 Bucket Default Encryption + * in the Amazon S3 User Guide. + *
+ *If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests.
+ *+ * Directory buckets - You can optionally configure default encryption + * for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS).
+ *We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your
+ * CreateSession
requests or PUT
object requests. Then, new objects
+ * are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket.
+ * Amazon Web Services managed key (aws/s3
) isn't supported.
+ *
S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets
+ * to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or
+ * the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
+ *For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests.
+ *If you're specifying a customer managed KMS key, we recommend using a fully qualified * KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the @@ -48,12 +101,32 @@ export interface PutBucketEncryptionCommandOutput extends __MetadataBearer {} *
Also, this action requires Amazon Web Services Signature Version 4. For more information, see * Authenticating Requests (Amazon Web Services Signature Version 4).
*To use this operation, you must have permission to perform the
- * s3:PutEncryptionConfiguration
action. The bucket owner has this permission
- * by default. The bucket owner can grant this permission to others. For more information
- * about permissions, see Permissions Related to Bucket Subresource Operations and Managing
- * Access Permissions to Your Amazon S3 Resources in the
- * Amazon S3 User Guide.
+ * General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy.
+ * The bucket owner has this permission
+ * by default. The bucket owner can grant this permission to others. For more information
+ * about permissions, see Permissions Related to Bucket Operations and Managing
+ * Access Permissions to Your Amazon S3 Resources in the
+ * Amazon S3 User Guide.
+ * Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
+ * Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to PutBucketEncryption
:
CreateSession
* .
+ * If the object is encrypted with
+ * SSE-KMS, you must also have the
+ * kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
CreateSession
* .
+ * If the object is encrypted with
+ * SSE-KMS, you must also have the
+ * kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
x-amz-server-side-encryption-customer-key-MD5
* * + *+ * For more information, see Using Server-Side + * Encryption in the Amazon S3 User Guide.
* *
- * Directory bucket - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
).
* - * For more information, see Using Server-Side - * Encryption in the Amazon S3 User Guide.
* *ReadOnly
on the copy destination.
*
*
+ * If the object is encrypted with
+ * SSE-KMS, you must also have the
+ * kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the * Amazon S3 User Guide.
* @@ -160,7 +163,14 @@ export interface UploadPartCopyCommandOutput extends UploadPartCopyOutput, __Met * *
- * Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). For more
+ * information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
+ * For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation,
+ * the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + * to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
*The server-side encryption algorithm used when storing this object in Amazon S3 (for example,
* AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key - * that was used for the object.
- *This functionality is not supported for directory buckets.
- *If present, indicates the ID of the KMS key that was used for object encryption.
* @public */ SSEKMSKeyId?: string; @@ -511,9 +504,6 @@ export interface CompleteMultipartUploadOutput { /** *Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption * with Key Management Service (KMS) keys (SSE-KMS).
- *This functionality is not supported for directory buckets.
- *The server-side encryption algorithm used when you store this object in Amazon S3 (for example,
* AES256
, aws:kms
, aws:kms:dsse
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key - * that was used for the object.
- *This functionality is not supported for directory buckets.
- *If present, indicates the ID of the KMS key that was used for object encryption.
* @public */ SSEKMSKeyId?: string; @@ -893,9 +876,6 @@ export interface CopyObjectOutput { *If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The * value of this header is a base64-encoded UTF-8 string holding JSON with the encryption * context key-value pairs.
- *This functionality is not supported for directory buckets.
- *Indicates whether the copied object uses an S3 Bucket Key for server-side encryption * with Key Management Service (KMS) keys (SSE-KMS).
- *This functionality is not supported for directory buckets.
- *The server-side encryption algorithm used when storing this object in Amazon S3 (for example,
- * AES256
, aws:kms
, aws:kms:dsse
). Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request
response.
The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request
response.
Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. * When copying an object, if you don't specify encryption information in your copy * request, the encryption setting of the target object is set to the default * encryption configuration of the destination bucket. By default, all buckets have a * base level of encryption configuration that uses server-side encryption with Amazon S3 - * managed keys (SSE-S3). If the destination bucket has a default encryption - * configuration that uses server-side encryption with Key Management Service (KMS) keys - * (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or - * server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses - * the corresponding KMS key, or a customer-provided key to encrypt the target + * managed keys (SSE-S3). If the destination bucket has a different default encryption + * configuration, Amazon S3 uses + * the corresponding encryption key to encrypt the target * object copy.
- *When you perform a CopyObject
operation, if you want to use a
- * different type of encryption setting for the target object, you can specify
- * appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a
- * KMS key, or a customer-provided key. If the encryption setting in
- * your request is different from the default encryption configuration of the
- * destination bucket, the encryption setting in your request takes precedence.
With server-side * encryption, Amazon S3 encrypts your data as it writes your data to disks in its data * centers and decrypts the data when you access it. For more information about server-side encryption, see Using * Server-Side Encryption in the * Amazon S3 User Guide.
- *For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
+ * General purpose buckets + *
+ *For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys + * (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and + * server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses + * the corresponding KMS key, or a customer-provided key to encrypt the target + * object copy.
+ *When you perform a CopyObject
operation, if you want to use a
+ * different type of encryption setting for the target object, you can specify
+ * appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a
+ * KMS key, or a customer-provided key. If the encryption setting in
+ * your request is different from the default encryption configuration of the
+ * destination bucket, the encryption setting in your request takes precedence.
+ * Directory buckets + *
+ *For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your
+ * CreateSession
requests or PUT
object requests. Then, new objects
+ * are automatically encrypted with the desired encryption settings. For more
+ * information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key).
+ * Amazon Web Services managed key (aws/s3
) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration.
+ * Then, when you perform a CopyObject
operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.
+ *
Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an + *
Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an * object protected by KMS will fail if they're not made via SSL or using SigV4. For * information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see * Specifying the * Signature Version in Request Authentication in the * Amazon S3 User Guide.
- *This functionality is not supported when the destination bucket is a directory bucket.
- *
+ * Directory buckets - If you specify x-amz-server-side-encryption
with aws:kms
, you must specify the
+ * x-amz-server-side-encryption-aws-kms-key-id
header with the ID (Key ID or Key ARN) of the KMS
+ * symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request
error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket.
+ * Amazon Web Services managed key (aws/s3
) isn't supported.
+ *
Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of + *
Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for the destination object encryption. The value of
* this header is a base64-encoded UTF-8 string holding JSON with the encryption context
- * key-value pairs. This value must be explicitly added to specify encryption context for
- * CopyObject
requests.
This functionality is not supported when the destination bucket is a directory bucket.
- *
+ * General purpose buckets - This value must be explicitly added to specify encryption context for
+ * CopyObject
requests if you want an additional encryption context for your destination object. The additional encryption context of the source object won't be copied to the destination object. For more information, see Encryption context in the Amazon S3 User Guide.
+ * Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
* @public */ SSEKMSEncryptionContext?: string; @@ -1594,7 +1600,9 @@ export interface CopyObjectRequest { *For more information, see Amazon S3 Bucket Keys in the * Amazon S3 User Guide.
*This functionality is not supported when the destination bucket is a directory bucket.
+ *+ * Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + * to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
*The server-side encryption algorithm used when you store this object in Amazon S3 (for example,
* AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key - * that was used for the object.
- *This functionality is not supported for directory buckets.
- *If present, indicates the ID of the KMS key that was used for object encryption.
* @public */ SSEKMSKeyId?: string; /** - *If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The - * value of this header is a base64-encoded UTF-8 string holding JSON with the encryption - * context key-value pairs.
- *This functionality is not supported for directory buckets.
- *If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of + * this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
* @public */ SSEKMSEncryptionContext?: string; @@ -2238,9 +2235,6 @@ export interface CreateMultipartUploadOutput { /** *Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption * with Key Management Service (KMS) keys (SSE-KMS).
- *This functionality is not supported for directory buckets.
- *The server-side encryption algorithm used when you store this object in Amazon S3 (for example,
* AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
+ * Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your
+ * CreateSession
requests or PUT
object requests. Then, new objects
+ * are automatically encrypted with the desired encryption settings. For more
+ * information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
+ *
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request.
+ * You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request.
+ * You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and
+ * Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
+ *
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the
+ * CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request.
+ * So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy),
+ * the encryption request headers must match the default encryption configuration of the directory bucket.
+ *
+ *
Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption.
- *This functionality is not supported for directory buckets.
- *Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same + * account that's issuing the command, you must use the full Key ARN not the Key ID.
+ *
+ * General purpose buckets - If you specify x-amz-server-side-encryption
with aws:kms
or aws:kms:dsse
, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS
+ * key to use. If you specify
+ * x-amz-server-side-encryption:aws:kms
or
+ * x-amz-server-side-encryption:aws:kms:dsse
, but do not provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key
+ * (aws/s3
) to protect the data.
+ * Directory buckets - If you specify x-amz-server-side-encryption
with aws:kms
, you must specify the
+ * x-amz-server-side-encryption-aws-kms-key-id
header with the ID (Key ID or Key ARN) of the KMS
+ * symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request
error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket.
+ * Amazon Web Services managed key (aws/s3
) isn't supported.
+ *
Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of - * this header is a base64-encoded UTF-8 string holding JSON with the encryption context - * key-value pairs.
- *This functionality is not supported for directory buckets.
- *+ * Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
* @public */ SSEKMSEncryptionContext?: string; /** *Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with - * server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to + * server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
+ *
+ * General purpose buckets - Setting this header to
* true
causes Amazon S3 to use an S3 Bucket Key for object encryption with
- * SSE-KMS.
Specifying this header with an object action doesn’t affect bucket-level settings for S3 + * SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 * Bucket Key.
- *This functionality is not supported for directory buckets.
- *
+ * Directory buckets - S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets
+ * to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or
+ * the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
The established temporary security credentials of the session.
*- * Directory buckets - These session credentials are only supported for the authentication and authorization of Zonal endpoint APIs on directory buckets.
+ * Directory buckets - These session credentials are only supported for the authentication and authorization of Zonal endpoint API operations on directory buckets. *The server-side encryption algorithm used when you store objects in the directory bucket.
+ * @public + */ + ServerSideEncryption?: ServerSideEncryption; + + /** + *If you specify x-amz-server-side-encryption
with aws:kms
, this header indicates the ID of the KMS
+ * symmetric encryption customer managed key that was used for object encryption.
If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of
+ * this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
+ * This value is stored as object metadata and automatically gets
+ * passed on to Amazon Web Services KMS for future GetObject
+ * operations on this object.
Indicates whether to use an S3 Bucket Key for server-side encryption + * with KMS keys (SSE-KMS).
+ * @public + */ + BucketKeyEnabled?: boolean; + /** *The established temporary security credentials for the created session.
* @public @@ -2910,9 +2964,9 @@ export interface CreateSessionRequest { /** *Specifies the mode of the session that will be created, either ReadWrite
or
* ReadOnly
. By default, a ReadWrite
session is created. A
- * ReadWrite
session is capable of executing all the Zonal endpoint APIs on a
+ * ReadWrite
session is capable of executing all the Zonal endpoint API operations on a
* directory bucket. A ReadOnly
session is constrained to execute the following
- * Zonal endpoint APIs: GetObject
, HeadObject
, ListObjectsV2
,
+ * Zonal endpoint API operations: GetObject
, HeadObject
, ListObjectsV2
,
* GetObjectAttributes
, ListParts
, and
* ListMultipartUploads
.
The server-side encryption algorithm to use when you store objects in the directory bucket.
+ *For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). By default, Amazon S3 encrypts data with SSE-S3.
+ * For more
+ * information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If you specify x-amz-server-side-encryption
with aws:kms
, you must specify the
+ * x-amz-server-side-encryption-aws-kms-key-id
header with the ID (Key ID or Key ARN) of the KMS
+ * symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request
error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same
+ * account that't issuing the command, you must use the full Key ARN not the Key ID.
Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket.
+ * Amazon Web Services managed key (aws/s3
) isn't supported.
+ *
Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of
+ * this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
+ * This value is stored as object metadata and automatically gets passed on
+ * to Amazon Web Services KMS for future GetObject
operations on
+ * this object.
+ * General purpose buckets - This value must be explicitly added during CopyObject
operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.
+ * Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
+ * @public + */ + SSEKMSEncryptionContext?: string; + + /** + *Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with + * server-side encryption using KMS keys (SSE-KMS).
+ *S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets
+ * to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or
+ * the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
The name of the bucket containing the server-side encryption configuration to * delete.
+ *
+ * Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+ *
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format
+ * bucket_base_name--az_id--x-s3
(for example,
+ * DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
+ *
Note: To supply the Multi-region Access Point (MRAP) to Bucket, you need to install the "@aws-sdk/signature-v4-crt" package to your project dependencies. * For more information, please go to https://github.com/aws/aws-sdk-js-v3#known-issues
* @public @@ -3034,6 +3139,10 @@ export interface DeleteBucketEncryptionRequest { /** *The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code
+ * 501 Not Implemented
.
Describes the default server-side encryption to apply to new objects in the bucket. If a * PUT Object request doesn't specify any server-side encryption, this default encryption will - * be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates - * an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted - * with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more - * information, see PUT Bucket encryption in - * the Amazon S3 API Reference.
+ * be applied. For more + * information, see PutBucketEncryption. *If you're specifying a customer managed KMS key, we recommend using a fully qualified - * KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the - * requester’s account. This behavior can result in data that's encrypted with a KMS key - * that belongs to the requester, and not the bucket owner.
+ *
+ * General purpose buckets - If you don't specify a customer managed key at configuration, Amazon S3 automatically creates
+ * an Amazon Web Services KMS key (aws/s3
) in your Amazon Web Services account the first time that you add an object encrypted
+ * with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS.
+ * Directory buckets - Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket.
+ * Amazon Web Services managed key (aws/s3
) isn't supported.
+ *
+ * Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS.
+ *Server-side encryption algorithm to use for the default encryption.
+ *For directory buckets, there are only two supported values for server-side encryption: AES256
and aws:kms
.
Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default
- * encryption. This parameter is allowed if and only if SSEAlgorithm
is set to
- * aws:kms
or aws:kms:dsse
.
Amazon Web Services Key Management Service (KMS) customer managed key ID to use for the default + * encryption.
+ *
+ * General purpose buckets - This parameter is allowed if and only if SSEAlgorithm
is set to
+ * aws:kms
or aws:kms:dsse
.
+ * Directory buckets - This parameter is allowed if and only if SSEAlgorithm
is set to
+ * aws:kms
.
You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS * key.
*If you use a key ID, you can run into a LogDestination undeliverable error when creating - * a VPC flow log.
- *If you are using encryption with cross-account or Amazon Web Services service operations you must use + *
If you are using encryption with cross-account or Amazon Web Services service operations, you must use * a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.
+ *+ * General purpose buckets - If you're specifying a customer managed KMS key, we recommend using a fully qualified + * KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the + * requester’s account. This behavior can result in data that's encrypted with a KMS key + * that belongs to the requester, and not the bucket owner. Also, if you use a key ID, you can run into a LogDestination undeliverable error when creating + * a VPC flow log. + *
+ *+ * Directory buckets - When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
+ *Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service * Developer Guide.
@@ -6087,10 +6238,19 @@ export interface ServerSideEncryptionByDefault { /** *Specifies the default server-side encryption configuration.
*If you're specifying a customer managed KMS key, we recommend using a fully qualified - * KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the - * requester’s account. This behavior can result in data that's encrypted with a KMS key - * that belongs to the requester, and not the bucket owner.
+ *+ * General purpose buckets - If you're specifying a customer managed KMS key, we recommend using a fully qualified + * KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the + * requester’s account. This behavior can result in data that's encrypted with a KMS key + * that belongs to the requester, and not the bucket owner.
+ *+ * Directory buckets - When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
+ *Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS
* (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the
* BucketKeyEnabled
element to true
causes Amazon S3 to use an S3
- * Bucket Key. By default, S3 Bucket Key is not enabled.
For more information, see Amazon S3 Bucket Keys in the - * Amazon S3 User Guide.
+ * Bucket Key. + *+ * General purpose buckets - By default, S3 Bucket Key is not enabled. For more information, see Amazon S3 Bucket Keys in the + * Amazon S3 User Guide.
+ *
+ * Directory buckets - S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets
+ * to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or
+ * the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
The name of the bucket from which the server-side encryption configuration is * retrieved.
+ *
+ * Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+ *
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format
+ * bucket_base_name--az_id--x-s3
(for example,
+ * DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
+ *
Note: To supply the Multi-region Access Point (MRAP) to Bucket, you need to install the "@aws-sdk/signature-v4-crt" package to your project dependencies. * For more information, please go to https://github.com/aws/aws-sdk-js-v3#known-issues
* @public @@ -6154,6 +6333,10 @@ export interface GetBucketEncryptionRequest { /** *The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code
+ * 501 Not Implemented
.
Optional configuration to replicate existing source bucket objects. For more - * information, see Replicating Existing Objects in the Amazon S3 User Guide. + *
Optional configuration to replicate existing source bucket objects. *
+ *This parameter is no longer supported. To replicate existing objects, see Replicating existing objects with S3 Batch Replication in the Amazon S3 User Guide.
+ *Optional configuration to replicate existing source bucket objects. For more - * information, see Replicating Existing Objects in the Amazon S3 User Guide. + *
Optional configuration to replicate existing source bucket objects. *
+ *This parameter is no longer supported. To replicate existing objects, see Replicating existing objects with S3 Batch Replication in the Amazon S3 User Guide.
+ *The server-side encryption algorithm used when you store this object in Amazon S3 (for example,
- * AES256
, aws:kms
, aws:kms:dsse
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3.
* @public */ ServerSideEncryption?: ServerSideEncryption; @@ -9132,11 +9315,7 @@ export interface GetObjectOutput { SSECustomerKeyMD5?: string; /** - *If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key - * that was used for the object.
- *This functionality is not supported for directory buckets.
- *If present, indicates the ID of the KMS key that was used for object encryption.
* @public */ SSEKMSKeyId?: string; @@ -9144,9 +9323,6 @@ export interface GetObjectOutput { /** *Indicates whether the object uses an S3 Bucket Key for server-side encryption * with Key Management Service (KMS) keys (SSE-KMS).
- *This functionality is not supported for directory buckets.
- *To retrieve the checksum, this mode must be enabled.
- *In addition, if you enable checksum mode and the object is uploaded with a + *
+ * General purpose buckets - In addition, if you enable checksum mode and the object is uploaded with a
* checksum
* and encrypted with an Key Management Service (KMS) key, you must have permission to use the
* kms:Decrypt
action to retrieve the checksum.
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,
* AES256
, aws:kms
, aws:kms:dsse
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key - * that was used for the object.
- *This functionality is not supported for directory buckets.
- *If present, indicates the ID of the KMS key that was used for object encryption.
* @public */ SSEKMSKeyId?: string; @@ -10888,9 +11058,6 @@ export interface HeadObjectOutput { /** *Indicates whether the object uses an S3 Bucket Key for server-side encryption * with Key Management Service (KMS) keys (SSE-KMS).
- *This functionality is not supported for directory buckets.
- *To retrieve the checksum, this parameter must be enabled.
- *In addition, if you enable checksum mode and the object is uploaded with a + *
+ * General purpose buckets - If you enable checksum mode and the object is uploaded with a
* checksum
* and encrypted with an Key Management Service (KMS) key, you must have permission to use the
* kms:Decrypt
action to retrieve the checksum.
+ * Directory buckets - If you enable ChecksumMode
and the object is encrypted with
+ * Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the
+ * kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Specifies default encryption for a bucket using server-side encryption with different - * key options. By default, all buckets have a default encryption configuration that uses - * server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure - * default encryption for a bucket by using server-side encryption with an Amazon Web Services KMS key - * (SSE-KMS) or a customer-provided key (SSE-C). For information about the bucket default - * encryption feature, see Amazon S3 Bucket Default Encryption - * in the Amazon S3 User Guide.
+ * key options. + *
+ * Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+ *
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format
+ * bucket_base_name--az_id--x-s3
(for example,
+ * DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide
+ *
Note: To supply the Multi-region Access Point (MRAP) to Bucket, you need to install the "@aws-sdk/signature-v4-crt" package to your project dependencies. * For more information, please go to https://github.com/aws/aws-sdk-js-v3#known-issues
* @public @@ -13544,6 +13717,9 @@ export interface PutBucketEncryptionRequest { *The base64-encoded 128-bit MD5 digest of the server-side encryption * configuration.
*For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
+ *This functionality is not supported for directory buckets.
+ *If you provide an individual checksum, Amazon S3 ignores any provided
* ChecksumAlgorithm
parameter.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code
+ * 501 Not Implemented
.
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,
- * AES256
, aws:kms
, aws:kms:dsse
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3.
* @public */ ServerSideEncryption?: ServerSideEncryption; @@ -575,25 +571,17 @@ export interface PutObjectOutput { SSECustomerKeyMD5?: string; /** - *If x-amz-server-side-encryption
has a valid value of aws:kms
- * or aws:kms:dsse
, this header indicates the ID of the Key Management Service (KMS)
- * symmetric encryption customer managed key that was used for the object.
This functionality is not supported for directory buckets.
- *If present, indicates the ID of the KMS key that was used for object encryption.
* @public */ SSEKMSKeyId?: string; /** - *If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the encryption
- * context key-value pairs. This value is stored as object metadata and automatically gets
- * passed on to Amazon Web Services KMS for future GetObject
or CopyObject
+ *
If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of
+ * this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
+ * This value is stored as object metadata and automatically gets
+ * passed on to Amazon Web Services KMS for future GetObject
* operations on this object.
This functionality is not supported for directory buckets.
- *Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption * with Key Management Service (KMS) keys (SSE-KMS).
- *This functionality is not supported for directory buckets.
- *The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example,
* AES256
, aws:kms
, aws:kms:dsse
).
- * General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in - * Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the - * encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or - * DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side - * encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to - * encrypt data at rest by using server-side encryption with other key options. For more - * information, see Using Server-Side - * Encryption in the Amazon S3 User Guide.
- *
- * Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) value is supported.
+ * General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in + * Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the + * encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + * DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side + * encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to + * encrypt data at rest by using server-side encryption with other key options. For more + * information, see Using Server-Side + * Encryption in the Amazon S3 User Guide.
+ *
+ * Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your
+ * CreateSession
requests or PUT
object requests. Then, new objects
+ * are automatically encrypted with the desired encryption settings. For more
+ * information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
+ *
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request.
+ * You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request.
+ * You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and
+ * Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket.
+ *
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the
+ * CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request.
+ * So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy),
+ * the encryption request headers must match the default encryption configuration of the directory bucket.
+ *
+ *
If x-amz-server-side-encryption
has a valid value of aws:kms
- * or aws:kms:dsse
, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS)
- * symmetric encryption customer managed key that was used for the object. If you specify
- * x-amz-server-side-encryption:aws:kms
or
- * x-amz-server-side-encryption:aws:kms:dsse
, but do not provide
- * x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key
- * (aws/s3
) to protect the data. If the KMS key does not exist in the same
- * account that's issuing the command, you must use the full ARN and not just the ID.
This functionality is not supported for directory buckets.
- *Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same + * account that's issuing the command, you must use the full Key ARN not the Key ID.
+ *
+ * General purpose buckets - If you specify x-amz-server-side-encryption
with aws:kms
or aws:kms:dsse
, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS
+ * key to use. If you specify
+ * x-amz-server-side-encryption:aws:kms
or
+ * x-amz-server-side-encryption:aws:kms:dsse
, but do not provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key
+ * (aws/s3
) to protect the data.
+ * Directory buckets - If you specify x-amz-server-side-encryption
with aws:kms
, you must specify the
+ * x-amz-server-side-encryption-aws-kms-key-id
header with the ID (Key ID or Key ARN) of the KMS
+ * symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request
error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket.
+ * Amazon Web Services managed key (aws/s3
) isn't supported.
+ *
Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of
- * this header is a base64-encoded UTF-8 string holding JSON with the encryption context
- * key-value pairs. This value is stored as object metadata and automatically gets passed on
- * to Amazon Web Services KMS for future GetObject
or CopyObject
operations on
- * this object. This value must be explicitly added during CopyObject
operations.
This functionality is not supported for directory buckets.
- *Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of
+ * this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
+ * This value is stored as object metadata and automatically gets passed on
+ * to Amazon Web Services KMS for future GetObject
operations on
+ * this object.
+ * General purpose buckets - This value must be explicitly added during CopyObject
operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.
+ * Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
* @public */ SSEKMSEncryptionContext?: string; /** *Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with - * server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to + * server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
+ *
+ * General purpose buckets - Setting this header to
* true
causes Amazon S3 to use an S3 Bucket Key for object encryption with
- * SSE-KMS.
Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 + * SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 * Bucket Key.
- *This functionality is not supported for directory buckets.
- *
+ * Directory buckets - S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets
+ * to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or
+ * the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,
* AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key - * that was used for the object.
- *This functionality is not supported for directory buckets.
- *If present, indicates the ID of the KMS key that was used for object encryption.
* @public */ SSEKMSKeyId?: string; @@ -2776,9 +2783,6 @@ export interface UploadPartOutput { /** *Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption * with Key Management Service (KMS) keys (SSE-KMS).
- *This functionality is not supported for directory buckets.
- *The server-side encryption algorithm used when you store this object in Amazon S3 (for example,
* AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key - * that was used for the object.
- *This functionality is not supported for directory buckets.
- *If present, indicates the ID of the KMS key that was used for object encryption.
* @public */ SSEKMSKeyId?: string; @@ -3087,9 +3084,6 @@ export interface UploadPartCopyOutput { /** *Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption * with Key Management Service (KMS) keys (SSE-KMS).
- *This functionality is not supported for directory buckets.
- *Completes a multipart upload by assembling previously uploaded parts.
\nYou first initiate the multipart upload and then upload all parts using the UploadPart\n operation or the UploadPartCopy\n operation. After successfully uploading all relevant parts of an upload, you call this\n CompleteMultipartUpload
operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts\n in ascending order by part number to create a new object. In the CompleteMultipartUpload \n request, you must provide the parts list and ensure that the parts list is complete.\n The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list,\n you must provide the PartNumber
value and the ETag
value that are returned after that part\n was uploaded.
The processing of a CompleteMultipartUpload request could take several minutes to\n finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that\n specifies a 200 OK
response. While processing is in progress, Amazon S3 periodically sends white\n space characters to keep the connection from timing out. A request could fail after the\n initial 200 OK
response has been sent. This means that a 200 OK
response can\n contain either a success or an error. The error response might be embedded in the 200 OK
response. \n If you call this API operation directly, make sure to design\n your application to parse the contents of the response and handle it appropriately. If you\n use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply\n error handling per your configuration settings (including automatically retrying the\n request as appropriate). If the condition persists, the SDKs throw an exception (or, for\n the SDKs that don't use exceptions, they return an error).
Note that if CompleteMultipartUpload
fails, applications should be prepared\n to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best\n Practices.
You can't use Content-Type: application/x-www-form-urlencoded
for the \n CompleteMultipartUpload requests. Also, if you don't provide a\n Content-Type
header, CompleteMultipartUpload
can still return a 200\n OK
response.
For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - For\n information about permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions in the Amazon S3 User Guide.
\n\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
If you provide an additional checksum\n value in your MultipartUpload
requests and the\n object is encrypted with Key Management Service, you must have permission to use the\n kms:Decrypt
action for the\n CompleteMultipartUpload
request to succeed.
Error Code: EntityTooSmall
\n
Description: Your proposed upload is smaller than the minimum allowed object\n size. Each part must be at least 5 MB in size, except the last part.
\nHTTP Status Code: 400 Bad Request
\nError Code: InvalidPart
\n
Description: One or more of the specified parts could not be found. The part\n might not have been uploaded, or the specified ETag might not have\n matched the uploaded part's ETag.
\nHTTP Status Code: 400 Bad Request
\nError Code: InvalidPartOrder
\n
Description: The list of parts was not in ascending order. The parts list\n must be specified in order by part number.
\nHTTP Status Code: 400 Bad Request
\nError Code: NoSuchUpload
\n
Description: The specified multipart upload does not exist. The upload ID\n might be invalid, or the multipart upload might have been aborted or\n completed.
\nHTTP Status Code: 404 Not Found
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CompleteMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nCompletes a multipart upload by assembling previously uploaded parts.
\nYou first initiate the multipart upload and then upload all parts using the UploadPart\n operation or the UploadPartCopy\n operation. After successfully uploading all relevant parts of an upload, you call this\n CompleteMultipartUpload
operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts\n in ascending order by part number to create a new object. In the CompleteMultipartUpload \n request, you must provide the parts list and ensure that the parts list is complete.\n The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list,\n you must provide the PartNumber
value and the ETag
value that are returned after that part\n was uploaded.
The processing of a CompleteMultipartUpload request could take several minutes to\n finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that\n specifies a 200 OK
response. While processing is in progress, Amazon S3 periodically sends white\n space characters to keep the connection from timing out. A request could fail after the\n initial 200 OK
response has been sent. This means that a 200 OK
response can\n contain either a success or an error. The error response might be embedded in the 200 OK
response. \n If you call this API operation directly, make sure to design\n your application to parse the contents of the response and handle it appropriately. If you\n use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply\n error handling per your configuration settings (including automatically retrying the\n request as appropriate). If the condition persists, the SDKs throw an exception (or, for\n the SDKs that don't use exceptions, they return an error).
Note that if CompleteMultipartUpload
fails, applications should be prepared\n to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best\n Practices.
You can't use Content-Type: application/x-www-form-urlencoded
for the \n CompleteMultipartUpload requests. Also, if you don't provide a\n Content-Type
header, CompleteMultipartUpload
can still return a 200\n OK
response.
For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - For\n information about permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions in the Amazon S3 User Guide.
\nIf you provide an additional checksum\n value in your MultipartUpload
requests and the\n object is encrypted with Key Management Service, you must have permission to use the\n kms:Decrypt
action for the\n CompleteMultipartUpload
request to succeed.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Error Code: EntityTooSmall
\n
Description: Your proposed upload is smaller than the minimum allowed object\n size. Each part must be at least 5 MB in size, except the last part.
\nHTTP Status Code: 400 Bad Request
\nError Code: InvalidPart
\n
Description: One or more of the specified parts could not be found. The part\n might not have been uploaded, or the specified ETag might not have\n matched the uploaded part's ETag.
\nHTTP Status Code: 400 Bad Request
\nError Code: InvalidPartOrder
\n
Description: The list of parts was not in ascending order. The parts list\n must be specified in order by part number.
\nHTTP Status Code: 400 Bad Request
\nError Code: NoSuchUpload
\n
Description: The specified multipart upload does not exist. The upload ID\n might be invalid, or the multipart upload might have been aborted or\n completed.
\nHTTP Status Code: 404 Not Found
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CompleteMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThe server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
).
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
\nThis functionality is not supported for directory buckets.
\nIf present, indicates the ID of the KMS key that was used for object encryption.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
\nThis functionality is not supported for directory buckets.
\nIndicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -18659,7 +18659,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.
\nYou can copy individual objects between general purpose buckets, between directory buckets, and \n between general purpose buckets and directory buckets.
\nAmazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.
\nBoth the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable \n or disable a Region for standalone accounts in the\n Amazon Web Services Account Management Guide.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer\n Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed. For more information, see REST Authentication.
\n Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the \n temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
\nYou must have\n read access to the source object and write\n access to the destination bucket.
\n\n General purpose bucket permissions -\n You must have permissions in an IAM policy based on the source and destination\n bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have\n \n s3:GetObject
\n \n permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have\n \n s3:PutObject
\n \n permission to write the object copy to the destination bucket.
\n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in a CopyObject
operation.
If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession
\n permission in\n the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the \n s3express:CreateSession
\n permission in the\n Action
element of a policy to write the object\n to the destination. The s3express:SessionMode
condition\n key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.
\nWhen the request is an HTTP 1.1 request, the response is chunk encoded. When\n the request is not an HTTP 1.1 request, the response would not contain the\n Content-Length
. You always need to read the entire response body\n to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied\n object.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error.
\nIf the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. For example, in a cross-region copy, you \n may encounter throttling and receive a 200 OK
response. \n For more information, see Resolve \n the Error 200 response when copying objects to Amazon S3. \n The 200 OK
status code means the copy was accepted, but \n it doesn't mean the copy is complete. Another example is \n when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. \n You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make\n sure to design your application to parse the content of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throw an exception (or, for the SDKs that don't use exceptions, they return an \n error).
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see\n Amazon S3 pricing.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.
\nYou can copy individual objects between general purpose buckets, between directory buckets, and \n between general purpose buckets and directory buckets.
\nAmazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.
\nBoth the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable \n or disable a Region for standalone accounts in the\n Amazon Web Services Account Management Guide.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer\n Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed. For more information, see REST Authentication.
\n Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the \n temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
\nYou must have\n read access to the source object and write\n access to the destination bucket.
\n\n General purpose bucket permissions -\n You must have permissions in an IAM policy based on the source and destination\n bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have\n \n s3:GetObject
\n \n permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have\n \n s3:PutObject
\n \n permission to write the object copy to the destination bucket.
\n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in a CopyObject
operation.
If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession
\n permission in\n the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the \n s3express:CreateSession
\n permission in the\n Action
element of a policy to write the object\n to the destination. The s3express:SessionMode
condition\n key can't be set to ReadOnly
on the copy destination bucket.
If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.
\nWhen the request is an HTTP 1.1 request, the response is chunk encoded. When\n the request is not an HTTP 1.1 request, the response would not contain the\n Content-Length
. You always need to read the entire response body\n to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied\n object.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error.
\nIf the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. For example, in a cross-region copy, you \n may encounter throttling and receive a 200 OK
response. \n For more information, see Resolve \n the Error 200 response when copying objects to Amazon S3. \n The 200 OK
status code means the copy was accepted, but \n it doesn't mean the copy is complete. Another example is \n when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. \n You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make\n sure to design your application to parse the content of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throw an exception (or, for the SDKs that don't use exceptions, they return an \n error).
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see\n Amazon S3 pricing.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
\nThis functionality is not supported for directory buckets.
\nIf present, indicates the ID of the KMS key that was used for object encryption.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.
\nThis functionality is not supported for directory buckets.
\nIf present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the copied object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
\nThis functionality is not supported for directory buckets.
\nIndicates whether the copied object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -18945,7 +18945,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
). Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request
response.
Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket.\n When copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a default encryption\n configuration that uses server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses\n the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.
\nWhen you perform a CopyObject
operation, if you want to use a\n different type of encryption setting for the target object, you can specify \n appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a\n KMS key, or a customer-provided key. If the encryption setting in\n your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence.
With server-side\n encryption, Amazon S3 encrypts your data as it writes your data to disks in its data\n centers and decrypts the data when you access it. For more information about server-side encryption, see Using\n Server-Side Encryption in the\n Amazon S3 User Guide.
\nFor directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request
response.
Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket.\n When copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a different default encryption\n configuration, Amazon S3 uses\n the corresponding encryption key to encrypt the target\n object copy.
\nWith server-side\n encryption, Amazon S3 encrypts your data as it writes your data to disks in its data\n centers and decrypts the data when you access it. For more information about server-side encryption, see Using\n Server-Side Encryption in the\n Amazon S3 User Guide.
\n\n General purpose buckets \n
\nFor general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and \n server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses\n the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.
\nWhen you perform a CopyObject
operation, if you want to use a\n different type of encryption setting for the target object, you can specify \n appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a\n KMS key, or a customer-provided key. If the encryption setting in\n your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence.
\n Directory buckets \n
\nFor directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession
requests or PUT
object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). \n Amazon Web Services managed key (aws/s3
) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. \n Then, when you perform a CopyObject
operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration. \n
Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.
\nThis functionality is not supported when the destination bucket is a directory bucket.
\nSpecifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.
\n\n Directory buckets - If you specify x-amz-server-side-encryption
with aws:kms
, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id
header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request
error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3
) isn't supported. \n
Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value must be explicitly added to specify encryption context for \n CopyObject
requests.
This functionality is not supported when the destination bucket is a directory bucket.
\nSpecifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for the destination object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs.
\n\n General purpose buckets - This value must be explicitly added to specify encryption context for \n CopyObject
requests if you want an additional encryption context for your destination object. The additional encryption context of the source object won't be copied to the destination object. For more information, see Encryption context in the Amazon S3 User Guide.
\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the\n object.
\nSetting this header to\n true
causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3\n Bucket Key.
For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.
\nThis functionality is not supported when the destination bucket is a directory bucket.
\nSpecifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the\n object.
\nSetting this header to\n true
causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3\n Bucket Key.
For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.
\n\n Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
\nThis action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.
\nAfter you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stops charging you for\n storing them only after you either complete or abort a multipart upload.
\nIf you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart \n upload must be completed within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.
\n\n Directory buckets - S3 Lifecycle is not supported by directory buckets.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
For request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.
\n\n General purpose bucket permissions - To\n perform a multipart upload with encryption using an Key Management Service (KMS)\n KMS key, the requester must have permission to the\n kms:Decrypt
and kms:GenerateDataKey
actions on\n the key. The requester must also have permissions for the\n kms:GenerateDataKey
action for the\n CreateMultipartUpload
API. Then, the requester needs\n permissions for the kms:Decrypt
action on the\n UploadPart
and UploadPartCopy
APIs. These\n permissions are required because Amazon S3 must decrypt and read data from the\n encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API and permissions and Protecting data\n using server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. Amazon S3\n automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a\n multipart upload, if you don't specify encryption information in your request, the\n encryption setting of the uploaded parts is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded\n parts. When you perform a CreateMultipartUpload operation, if you want to use a different\n type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the\n object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption\n setting in your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If you choose\n to provide your own encryption key, the request headers you provide in UploadPart\n and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload
request.
Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key\n (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) –\n If you want Amazon Web Services to manage the keys used to encrypt data, specify the\n following headers in the request.
\n x-amz-server-side-encryption
\n
\n x-amz-server-side-encryption-aws-kms-key-id
\n
\n x-amz-server-side-encryption-context
\n
If you specify x-amz-server-side-encryption:aws:kms
, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id
,\n Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to\n protect the data.
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt
and kms:GenerateDataKey*
\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key,\n then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key\n policy and your IAM user or role.
\nAll GET
and PUT
requests for an object\n protected by KMS fail if you don't make them by using Secure Sockets\n Layer (SSL), Transport Layer Security (TLS), or Signature Version\n 4. For information about configuring any of the officially supported Amazon Web Services\n SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
For more information about server-side encryption with KMS keys\n (SSE-KMS), see Protecting Data\n Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.
\nUse customer-provided encryption keys (SSE-C) – If you want to manage\n your own encryption keys, provide all the following headers in the\n request.
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about server-side encryption with customer-provided\n encryption keys (SSE-C), see \n Protecting data using server-side encryption with customer-provided\n encryption keys (SSE-C) in the Amazon S3 User Guide.
\n\n Directory buckets -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CreateMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThis action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.
\nAfter you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stops charging you for\n storing them only after you either complete or abort a multipart upload.
\nIf you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart \n upload must be completed within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.
\n\n Directory buckets - S3 Lifecycle is not supported by directory buckets.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
For request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.
\n\n General purpose bucket permissions - To\n perform a multipart upload with encryption using an Key Management Service (KMS)\n KMS key, the requester must have permission to the\n kms:Decrypt
and kms:GenerateDataKey
actions on\n the key. The requester must also have permissions for the\n kms:GenerateDataKey
action for the\n CreateMultipartUpload
API. Then, the requester needs\n permissions for the kms:Decrypt
action on the\n UploadPart
and UploadPartCopy
APIs. These\n permissions are required because Amazon S3 must decrypt and read data from the\n encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API and permissions and Protecting data\n using server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. Amazon S3\n automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a\n multipart upload, if you don't specify encryption information in your request, the\n encryption setting of the uploaded parts is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded\n parts. When you perform a CreateMultipartUpload operation, if you want to use a different\n type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the\n object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption\n setting in your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If you choose\n to provide your own encryption key, the request headers you provide in UploadPart\n and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload
request.
Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key\n (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) –\n If you want Amazon Web Services to manage the keys used to encrypt data, specify the\n following headers in the request.
\n x-amz-server-side-encryption
\n
\n x-amz-server-side-encryption-aws-kms-key-id
\n
\n x-amz-server-side-encryption-context
\n
If you specify x-amz-server-side-encryption:aws:kms
, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id
,\n Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to\n protect the data.
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt
and kms:GenerateDataKey*
\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key,\n then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key\n policy and your IAM user or role.
\nAll GET
and PUT
requests for an object\n protected by KMS fail if you don't make them by using Secure Sockets\n Layer (SSL), Transport Layer Security (TLS), or Signature Version\n 4. For information about configuring any of the officially supported Amazon Web Services\n SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
For more information about server-side encryption with KMS keys\n (SSE-KMS), see Protecting Data\n Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.
\nUse customer-provided encryption keys (SSE-C) – If you want to manage\n your own encryption keys, provide all the following headers in the\n request.
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about server-side encryption with customer-provided\n encryption keys (SSE-C), see \n Protecting data using server-side encryption with customer-provided\n encryption keys (SSE-C) in the Amazon S3 User Guide.
\n\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession
requests or PUT
object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request. \n You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket. \n
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. \n So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n the encryption request headers must match the default encryption configuration of the directory bucket.\n\n
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, \n the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CreateMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThe server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
).
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
\nThis functionality is not supported for directory buckets.
\nIf present, indicates the ID of the KMS key that was used for object encryption.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.
\nThis functionality is not supported for directory buckets.
\nIf present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
\nThis functionality is not supported for directory buckets.
\nIndicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -19625,7 +19625,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
).
\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession
requests or PUT
object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. \n
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request. \n You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket. \n
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. \n So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n the encryption request headers must match the default encryption configuration of the directory bucket.\n\n
Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption.
\nThis functionality is not supported for directory buckets.
\nSpecifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same\n account that's issuing the command, you must use the full Key ARN not the Key ID.
\n\n General purpose buckets - If you specify x-amz-server-side-encryption
with aws:kms
or aws:kms:dsse
, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS \n key to use. If you specify\n x-amz-server-side-encryption:aws:kms
or\n x-amz-server-side-encryption:aws:kms:dsse
, but do not provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3
) to protect the data.
\n Directory buckets - If you specify x-amz-server-side-encryption
with aws:kms
, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id
header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request
error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3
) isn't supported. \n
Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs.
\nThis functionality is not supported for directory buckets.
\nSpecifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
\n\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true
causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.
Specifying this header with an object action doesn’t affect bucket-level settings for S3\n Bucket Key.
\nThis functionality is not supported for directory buckets.
\nSpecifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
\n\n General purpose buckets - Setting this header to\n true
causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3\n Bucket Key.
\n Directory buckets - S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint APIs on directory buckets. \n For more information about Zonal endpoint APIs that include the Availability Zone in the request endpoint, see \n S3 Express One Zone APIs in the Amazon S3 User Guide. \n
\nTo make Zonal endpoint API requests on a directory bucket, use the CreateSession
\n API operation. Specifically, you grant s3express:CreateSession
permission to a\n bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the\n CreateSession
API request on the bucket, which returns temporary security\n credentials that include the access key ID, secret access key, session token, and\n expiration. These credentials have associated permissions to access the Zonal endpoint APIs. After\n the session is created, you don’t need to use other policies to grant permissions to each\n Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by\n applying the temporary security credentials of the session to the request headers and\n following the SigV4 protocol for authentication. You also apply the session token to the\n x-amz-s3session-token
request header for authorization. Temporary security\n credentials are scoped to the bucket and expire after 5 minutes. After the expiration time,\n any calls that you make with those credentials will fail. You must use IAM credentials\n again to make a CreateSession
API request that generates a new set of\n temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond\n the original specified interval.
If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid\n service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to\n initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the\n Amazon S3 User Guide.
\nYou must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n \n CopyObject
API operation - Unlike other Zonal endpoint APIs, the CopyObject
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the CopyObject
API operation on directory buckets, see CopyObject.
\n \n HeadBucket
API operation - Unlike other Zonal endpoint APIs, the HeadBucket
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket
API operation on directory buckets, see HeadBucket.
To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that\n grants s3express:CreateSession
permission to the bucket. In a\n policy, you can have the s3express:SessionMode
condition key to\n control who can create a ReadWrite
or ReadOnly
session.\n For more information about ReadWrite
or ReadOnly
\n sessions, see \n x-amz-create-session-mode
\n . For example policies, see\n Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.
To grant cross-account access to Zonal endpoint APIs, the bucket policy should also grant both accounts the s3express:CreateSession
permission.
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. \n For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see \n S3 Express One Zone APIs in the Amazon S3 User Guide. \n
\nTo make Zonal endpoint API requests on a directory bucket, use the CreateSession
\n API operation. Specifically, you grant s3express:CreateSession
permission to a\n bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the\n CreateSession
API request on the bucket, which returns temporary security\n credentials that include the access key ID, secret access key, session token, and\n expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After\n the session is created, you don’t need to use other policies to grant permissions to each\n Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by\n applying the temporary security credentials of the session to the request headers and\n following the SigV4 protocol for authentication. You also apply the session token to the\n x-amz-s3session-token
request header for authorization. Temporary security\n credentials are scoped to the bucket and expire after 5 minutes. After the expiration time,\n any calls that you make with those credentials will fail. You must use IAM credentials\n again to make a CreateSession
API request that generates a new set of\n temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond\n the original specified interval.
If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid\n service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to\n initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the\n Amazon S3 User Guide.
\nYou must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n \n CopyObject
API operation - Unlike other Zonal endpoint API operations, the CopyObject
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the CopyObject
API operation on directory buckets, see CopyObject.
\n \n HeadBucket
API operation - Unlike other Zonal endpoint API operations, the HeadBucket
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket
API operation on directory buckets, see HeadBucket.
To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that\n grants s3express:CreateSession
permission to the bucket. In a\n policy, you can have the s3express:SessionMode
condition key to\n control who can create a ReadWrite
or ReadOnly
session.\n For more information about ReadWrite
or ReadOnly
\n sessions, see \n x-amz-create-session-mode
\n . For example policies, see\n Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.
To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession
permission.
If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession
requests or PUT
object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, \nyou authenticate and authorize requests through CreateSession for low latency. \n To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.
\n\n Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3
) isn't supported. \n After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.\n
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, \n you can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) from the CreateSession
request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket. \n
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. \n Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n it's not supported to override the values of the encryption settings from the CreateSession
request. \n\n
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The server-side encryption algorithm used when you store objects in the directory bucket.
", + "smithy.api#httpHeader": "x-amz-server-side-encryption" + } + }, + "SSEKMSKeyId": { + "target": "com.amazonaws.s3#SSEKMSKeyId", + "traits": { + "smithy.api#documentation": "If you specify x-amz-server-side-encryption
with aws:kms
, this header indicates the ID of the KMS \n symmetric encryption customer managed key that was used for object encryption.
If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject
\n operations on this object.
Indicates whether to use an S3 Bucket Key for server-side encryption\n with KMS keys (SSE-KMS).
", + "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" + } + }, "Credentials": { "target": "com.amazonaws.s3#SessionCredentials", "traits": { @@ -19787,7 +19815,7 @@ "SessionMode": { "target": "com.amazonaws.s3#SessionMode", "traits": { - "smithy.api#documentation": "Specifies the mode of the session that will be created, either ReadWrite
or\n ReadOnly
. By default, a ReadWrite
session is created. A\n ReadWrite
session is capable of executing all the Zonal endpoint APIs on a\n directory bucket. A ReadOnly
session is constrained to execute the following\n Zonal endpoint APIs: GetObject
, HeadObject
, ListObjectsV2
,\n GetObjectAttributes
, ListParts
, and\n ListMultipartUploads
.
Specifies the mode of the session that will be created, either ReadWrite
or\n ReadOnly
. By default, a ReadWrite
session is created. A\n ReadWrite
session is capable of executing all the Zonal endpoint API operations on a\n directory bucket. A ReadOnly
session is constrained to execute the following\n Zonal endpoint API operations: GetObject
, HeadObject
, ListObjectsV2
,\n GetObjectAttributes
, ListParts
, and\n ListMultipartUploads
.
The server-side encryption algorithm to use when you store objects in the directory bucket.
\nFor directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). By default, Amazon S3 encrypts data with SSE-S3. \n For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If you specify x-amz-server-side-encryption
with aws:kms
, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id
header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request
error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same\n account that't issuing the command, you must use the full Key ARN not the Key ID.
Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3
) isn't supported. \n
Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject
operations on\n this object.
\n General purpose buckets - This value must be explicitly added during CopyObject
operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.
\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", + "smithy.api#httpHeader": "x-amz-server-side-encryption-context" + } + }, + "BucketKeyEnabled": { + "target": "com.amazonaws.s3#BucketKeyEnabled", + "traits": { + "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using KMS keys (SSE-KMS).
\nS3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
This operation is not supported by directory buckets.
\nThis implementation of the DELETE action resets the default encryption for the bucket as\n server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket\n default encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.
\nTo use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to DeleteBucketEncryption
:
\n PutBucketEncryption\n
\n\n GetBucketEncryption\n
\nThis implementation of the DELETE action resets the default encryption for the bucket as\n server-side encryption with Amazon S3 managed keys (SSE-S3).
\n\n General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket\n Default Encryption in the Amazon S3 User Guide.
\n\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior \n for directory buckets.
\n\n General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy. \n The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
\n Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to DeleteBucketEncryption
:
\n PutBucketEncryption\n
\n\n GetBucketEncryption\n
\nThe name of the bucket containing the server-side encryption configuration to\n delete.
", + "smithy.api#documentation": "The name of the bucket containing the server-side encryption configuration to\n delete.
\n\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3
(for example, \n DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented
.
Optional configuration to replicate existing source bucket objects. For more\n information, see Replicating Existing Objects in the Amazon S3 User Guide.\n
" + "smithy.api#documentation": "Optional configuration to replicate existing source bucket objects. \n
\nThis parameter is no longer supported. To replicate existing objects, see Replicating existing objects with S3 Batch Replication in the Amazon S3 User Guide.
\nThis operation is not supported by directory buckets.
\nReturns the default encryption configuration for an Amazon S3 bucket. By default, all buckets\n have a default encryption configuration that uses server-side encryption with Amazon S3 managed\n keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket\n Default Encryption in the Amazon S3 User Guide.
\nTo use this operation, you must have permission to perform the\n s3:GetEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
The following operations are related to GetBucketEncryption
:
\n PutBucketEncryption\n
\nReturns the default encryption configuration for an Amazon S3 bucket. By default, all buckets\n have a default encryption configuration that uses server-side encryption with Amazon S3 managed\n keys (SSE-S3).
\n\n General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket\n Default Encryption in the Amazon S3 User Guide.
\n\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior \n for directory buckets.
\n\n General purpose bucket permissions - The s3:GetEncryptionConfiguration
permission is required in a policy. \n The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
\n Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to GetBucketEncryption
:
\n PutBucketEncryption\n
\nThe name of the bucket from which the server-side encryption configuration is\n retrieved.
", + "smithy.api#documentation": "The name of the bucket from which the server-side encryption configuration is\n retrieved.
\n\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3
(for example, \n DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented
.
Retrieves an object from Amazon S3.
\nIn the GetObject
request, specify the full key name for the object.
\n General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the object key name as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the object key name as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host\n Header Bucket Specification in the Amazon S3 User Guide.
\n Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket--use1-az5--x-s3
, specify the object key name as /photos/2006/February/sample.jpg
. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject
, you must have the READ
\n access to the object (or version). If you grant READ
access to the anonymous user, the GetObject
operation \n returns the object without using an authorization header. For more information, see Specifying permissions in\n a policy in the Amazon S3 User Guide.
If you include a versionId
in your request header, you must have the\n s3:GetObjectVersion
permission to access a specific\n version of an object. The s3:GetObject
permission is not required in this scenario.
If you request the\n current version of an object without a specific versionId
in the request header, only\n the s3:GetObject
permission is required. The s3:GetObjectVersion
permission is not required in this scenario.\n
If the object that you request doesn’t exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket
\n permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an\n HTTP status code 403 Access Denied
error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the \n S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the \n S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this operation returns an\n InvalidObjectState
error. For information about restoring archived objects,\n see Restoring\n Archived Objects in the Amazon S3 User Guide.
\n Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request
.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for the GetObject
requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject
requests for the object that uses \n these types of keys, you’ll get an HTTP 400 Bad Request
error.
There are times when you want to override certain response header values of a\n GetObject
response. For example, you might override the\n Content-Disposition
response header value through your GetObject
\n request.
You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK
is returned. \n The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. \n
The response headers that you can override for the\n GetObject
response are Cache-Control
, Content-Disposition
, \n Content-Encoding
, Content-Language
, Content-Type
, and Expires
.
To override values for a set of response headers in the\n GetObject
response, you can use the following query\n parameters in the request.
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
\n response-content-language
\n
\n response-content-type
\n
\n response-expires
\n
When you use these parameters, you must sign the request by using either an Authorization header or a\n presigned URL. These parameters cannot be used with an\n unsigned (anonymous) request.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nRetrieves an object from Amazon S3.
\nIn the GetObject
request, specify the full key name for the object.
\n General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the object key name as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the object key name as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host\n Header Bucket Specification in the Amazon S3 User Guide.
\n Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket--use1-az5--x-s3
, specify the object key name as /photos/2006/February/sample.jpg
. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject
, you must have the READ
\n access to the object (or version). If you grant READ
access to the anonymous user, the GetObject
operation \n returns the object without using an authorization header. For more information, see Specifying permissions in\n a policy in the Amazon S3 User Guide.
If you include a versionId
in your request header, you must have the\n s3:GetObjectVersion
permission to access a specific\n version of an object. The s3:GetObject
permission is not required in this scenario.
If you request the\n current version of an object without a specific versionId
in the request header, only\n the s3:GetObject
permission is required. The s3:GetObjectVersion
permission is not required in this scenario.\n
If the object that you request doesn’t exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket
\n permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an\n HTTP status code 403 Access Denied
error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
If the object is encrypted using \n SSE-KMS, you must also have the\n kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the \n S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the \n S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this operation returns an\n InvalidObjectState
error. For information about restoring archived objects,\n see Restoring\n Archived Objects in the Amazon S3 User Guide.
\n Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request
.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for the GetObject
requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject
requests for the object that uses \n these types of keys, you’ll get an HTTP 400 Bad Request
error.
\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
\nThere are times when you want to override certain response header values of a\n GetObject
response. For example, you might override the\n Content-Disposition
response header value through your GetObject
\n request.
You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK
is returned. \n The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. \n
The response headers that you can override for the\n GetObject
response are Cache-Control
, Content-Disposition
, \n Content-Encoding
, Content-Language
, Content-Type
, and Expires
.
To override values for a set of response headers in the\n GetObject
response, you can use the following query\n parameters in the request.
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
\n response-content-language
\n
\n response-content-type
\n
\n response-expires
\n
When you use these parameters, you must sign the request by using either an Authorization header or a\n presigned URL. These parameters cannot be used with an\n unsigned (anonymous) request.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nRetrieves all the metadata from an object without returning the object itself. This\n operation is useful if you're interested only in an object's metadata.
\n\n GetObjectAttributes
combines the functionality of HeadObject
\n and ListParts
. All of the data returned with each of those individual calls\n can be returned with a single call to GetObjectAttributes
.
\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - To use\n GetObjectAttributes
, you must have READ access to the object. The permissions that you need to use this operation with depend on whether the\n bucket is versioned. If the bucket is versioned, you need both the\n s3:GetObjectVersion
and s3:GetObjectVersionAttributes
\n permissions for this operation. If the bucket is not versioned, you need the\n s3:GetObject
and s3:GetObjectAttributes
permissions.\n For more information, see Specifying Permissions in\n a Policy in the Amazon S3 User Guide. If the object\n that you request does not exist, the error Amazon S3 returns depends on whether you\n also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found
(\"no such key\")\n error.
If you don't have the s3:ListBucket
permission, Amazon S3 returns\n an HTTP status code 403 Forbidden
(\"access denied\")\n error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
Encryption request headers, like x-amz-server-side-encryption
,\n should not be sent for HEAD
requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. \n If you include this header in a GET
request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.
\n\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
\n to the versionId
query parameter in the request.
Consider the following when using request headers:
\nIf both of the If-Match
and If-Unmodified-Since
headers\n are present in the request as follows, then Amazon S3 returns the HTTP status code\n 200 OK
and the data requested:
\n If-Match
condition evaluates to true
.
\n If-Unmodified-Since
condition evaluates to\n false
.
For more information about conditional requests, see RFC 7232.
\nIf both of the If-None-Match
and If-Modified-Since
\n headers are present in the request as follows, then Amazon S3 returns the HTTP status code\n 304 Not Modified
:
\n If-None-Match
condition evaluates to false
.
\n If-Modified-Since
condition evaluates to\n true
.
For more information about conditional requests, see RFC 7232.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following actions are related to GetObjectAttributes
:
\n GetObject\n
\n\n GetObjectAcl\n
\n\n GetObjectLegalHold\n
\n\n GetObjectRetention\n
\n\n GetObjectTagging\n
\n\n HeadObject\n
\n\n ListParts\n
\nRetrieves all the metadata from an object without returning the object itself. This\n operation is useful if you're interested only in an object's metadata.
\n\n GetObjectAttributes
combines the functionality of HeadObject
\n and ListParts
. All of the data returned with each of those individual calls\n can be returned with a single call to GetObjectAttributes
.
\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - To use\n GetObjectAttributes
, you must have READ access to the object. The permissions that you need to use this operation depend on whether the\n bucket is versioned. If the bucket is versioned, you need both the\n s3:GetObjectVersion
and s3:GetObjectVersionAttributes
\n permissions for this operation. If the bucket is not versioned, you need the\n s3:GetObject
and s3:GetObjectAttributes
permissions.\n For more information, see Specifying Permissions in\n a Policy in the Amazon S3 User Guide. If the object\n that you request does not exist, the error Amazon S3 returns depends on whether you\n also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found
(\"no such key\")\n error.
If you don't have the s3:ListBucket
permission, Amazon S3 returns\n an HTTP status code 403 Forbidden
(\"access denied\")\n error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
Encryption request headers, like x-amz-server-side-encryption
,\n should not be sent for HEAD
requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. \n If you include this header in a GET
request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.
\n\n Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession
requests or PUT
object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
\n to the versionId
query parameter in the request.
Consider the following when using request headers:
\nIf both of the If-Match
and If-Unmodified-Since
headers\n are present in the request as follows, then Amazon S3 returns the HTTP status code\n 200 OK
and the data requested:
\n If-Match
condition evaluates to true
.
\n If-Unmodified-Since
condition evaluates to\n false
.
For more information about conditional requests, see RFC 7232.
\nIf both of the If-None-Match
and If-Modified-Since
\n headers are present in the request as follows, then Amazon S3 returns the HTTP status code\n 304 Not Modified
:
\n If-None-Match
condition evaluates to false
.
\n If-Modified-Since
condition evaluates to\n true
.
For more information about conditional requests, see RFC 7232.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following actions are related to GetObjectAttributes
:
\n GetObject\n
\n\n GetObjectAcl\n
\n\n GetObjectLegalHold\n
\n\n GetObjectRetention\n
\n\n GetObjectTagging\n
\n\n HeadObject\n
\n\n ListParts\n
\nThe server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3.
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -23877,14 +23933,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
\nThis functionality is not supported for directory buckets.
\nIf present, indicates the ID of the KMS key that was used for object encryption.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
\nThis functionality is not supported for directory buckets.
\nIndicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -24101,7 +24157,7 @@ "ChecksumMode": { "target": "com.amazonaws.s3#ChecksumMode", "traits": { - "smithy.api#documentation": "To retrieve the checksum, this mode must be enabled.
\nIn addition, if you enable checksum mode and the object is uploaded with a \n checksum \n and encrypted with an Key Management Service (KMS) key, you must have permission to use the \n kms:Decrypt
action to retrieve the checksum.
To retrieve the checksum, this mode must be enabled.
\n\n General purpose buckets - In addition, if you enable checksum mode and the object is uploaded with a \n checksum \n and encrypted with an Key Management Service (KMS) key, you must have permission to use the \n kms:Decrypt
action to retrieve the checksum.
The HEAD
operation retrieves metadata from an object without returning the\n object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not\n Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. \n It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.
\n\n General purpose bucket permissions - To\n use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3 in the Amazon S3\n User Guide.
If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns\n an HTTP status code 403 Forbidden
error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
Encryption request headers, like x-amz-server-side-encryption
,\n should not be sent for HEAD
requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. \n If you include this header in a HEAD
request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.
\n\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
\n Directory buckets - Delete marker is not supported by directory buckets.
\n\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
\n to the versionId
query parameter in the request.
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
The following actions are related to HeadObject
:
\n GetObject\n
\n\n GetObjectAttributes\n
\nThe HEAD
operation retrieves metadata from an object without returning the\n object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not\n Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. \n It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.
\n\n General purpose bucket permissions - To\n use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3 in the Amazon S3\n User Guide.
If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns\n an HTTP status code 403 Forbidden
error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
If you enable x-amz-checksum-mode
in the request and the object is encrypted with\n Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the\n kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Encryption request headers, like x-amz-server-side-encryption
,\n should not be sent for HEAD
requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. \n If you include this header in a HEAD
request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.
\n\n Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
\nIf the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
\n Directory buckets - Delete marker is not supported by directory buckets.
\n\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
\n to the versionId
query parameter in the request.
\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
The following actions are related to HeadObject
:
\n GetObject\n
\n\n GetObjectAttributes\n
\nThe server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
\nThis functionality is not supported for directory buckets.
\nIf present, indicates the ID of the KMS key that was used for object encryption.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
\nThis functionality is not supported for directory buckets.
\nIndicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -25151,7 +25207,7 @@ "ChecksumMode": { "target": "com.amazonaws.s3#ChecksumMode", "traits": { - "smithy.api#documentation": "To retrieve the checksum, this parameter must be enabled.
\nIn addition, if you enable checksum mode and the object is uploaded with a \n checksum \n and encrypted with an Key Management Service (KMS) key, you must have permission to use the \n kms:Decrypt
action to retrieve the checksum.
To retrieve the checksum, this parameter must be enabled.
\n\n General purpose buckets - If you enable checksum mode and the object is uploaded with a \n checksum \n and encrypted with an Key Management Service (KMS) key, you must have permission to use the \n kms:Decrypt
action to retrieve the checksum.
\n Directory buckets - If you enable ChecksumMode
and the object is encrypted with\n Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the\n kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
This operation is not supported by directory buckets.
\nThis action uses the encryption
subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.
\nIf you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.
\nAlso, this action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).
\nTo use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
\n GetBucketEncryption\n
\nThis operation configures default encryption \n and Amazon S3 Bucket Keys for an existing bucket.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n
. Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3).
\n\n General purpose buckets\n
\nYou can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). \n If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. For information about the bucket default\n encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.\n
\nIf you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests.
\n\n Directory buckets - You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS).
\nWe recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession
requests or PUT
object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.
Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3
) isn't supported. \n
S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
\nFor directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests.
\nIf you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.
\nAlso, this action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).
\n\n General purpose bucket permissions - The s3:PutEncryptionConfiguration
permission is required in a policy. \n The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
\n Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey
and the kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the target KMS key.
\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to PutBucketEncryption
:
\n GetBucketEncryption\n
\nSpecifies default encryption for a bucket using server-side encryption with different\n key options. By default, all buckets have a default encryption configuration that uses\n server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure\n default encryption for a bucket by using server-side encryption with an Amazon Web Services KMS key\n (SSE-KMS) or a customer-provided key (SSE-C). For information about the bucket default\n encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies default encryption for a bucket using server-side encryption with different\n key options.
\n\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n
. Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3
(for example, \n DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n
The base64-encoded 128-bit MD5 digest of the server-side encryption\n configuration.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
", + "smithy.api#documentation": "The base64-encoded 128-bit MD5 digest of the server-side encryption\n configuration.
\nFor requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
\nThis functionality is not supported for directory buckets.
\nIndicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum
or\n x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum
or\n x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm
parameter.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented
.
Adds an object to a bucket.
\nAmazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject
to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.
If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All\n objects written to the bucket by any account will be owned by the bucket owner.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:
\n\n S3 Object Lock - To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock in the Amazon S3 User Guide.
\nThis functionality is not supported for directory buckets.
\n\n S3 Versioning - When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID\n of that object being stored in Amazon S3. \n You can retrieve, replace, or delete any version of the object. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets in the Amazon S3\n User Guide. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.
\nThis functionality is not supported for directory buckets.
\n\n General purpose bucket permissions - The following permissions are required in your policies when your \n PutObject
request includes specific headers.
\n \n s3:PutObject
\n - To successfully complete the PutObject
request, you must always have the s3:PutObject
permission on a bucket to add an object\n to it.
\n \n s3:PutObjectAcl
\n - To successfully change the objects ACL of your PutObject
request, you must have the s3:PutObjectAcl
.
\n \n s3:PutObjectTagging
\n - To successfully set the tag-set with your PutObject
request, you\n must have the s3:PutObjectTagging
.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n General purpose bucket - To ensure that data is not corrupted traversing the network, use the\n Content-MD5
header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, \n you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.
\n Directory bucket - This functionality is not supported for directory buckets.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
For more information about related Amazon S3 APIs, see the following:
\n\n CopyObject\n
\n\n DeleteObject\n
\nAdds an object to a bucket.
\nAmazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject
to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.
If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All\n objects written to the bucket by any account will be owned by the bucket owner.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:
\n\n S3 Object Lock - To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock in the Amazon S3 User Guide.
\nThis functionality is not supported for directory buckets.
\n\n S3 Versioning - When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID\n of that object being stored in Amazon S3. \n You can retrieve, replace, or delete any version of the object. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets in the Amazon S3\n User Guide. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.
\nThis functionality is not supported for directory buckets.
\n\n General purpose bucket permissions - The following permissions are required in your policies when your \n PutObject
request includes specific headers.
\n \n s3:PutObject
\n - To successfully complete the PutObject
request, you must always have the s3:PutObject
permission on a bucket to add an object\n to it.
\n \n s3:PutObjectAcl
\n - To successfully change the objects ACL of your PutObject
request, you must have the s3:PutObjectAcl
.
\n \n s3:PutObjectTagging
\n - To successfully set the tag-set with your PutObject
request, you\n must have the s3:PutObjectTagging
.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
\n General purpose bucket - To ensure that data is not corrupted traversing the network, use the\n Content-MD5
header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, \n you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.
\n Directory bucket - This functionality is not supported for directory buckets.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
For more information about related Amazon S3 APIs, see the following:
\n\n CopyObject\n
\n\n DeleteObject\n
\nThe server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3.
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -31225,21 +31281,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If x-amz-server-side-encryption
has a valid value of aws:kms
\n or aws:kms:dsse
, this header indicates the ID of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object.
This functionality is not supported for directory buckets.
\nIf present, indicates the ID of the KMS key that was used for object encryption.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs. This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject
or CopyObject
\n operations on this object.
This functionality is not supported for directory buckets.
\nIf present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject
\n operations on this object.
Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
\nThis functionality is not supported for directory buckets.
\nIndicates whether the uploaded object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -31430,7 +31486,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
\n General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in\n Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the\n encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or\n DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side\n encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to\n encrypt data at rest by using server-side encryption with other key options. For more\n information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.
\n\n Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) value is supported.
The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
, aws:kms:dsse
).
\n General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in\n Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the\n encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or\n DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side\n encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to\n encrypt data at rest by using server-side encryption with other key options. For more\n information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.
\n\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession
requests or PUT
object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. \n
In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession
request. \n You can't override the values of the encryption settings (x-amz-server-side-encryption
, x-amz-server-side-encryption-aws-kms-key-id
, x-amz-server-side-encryption-context
, and x-amz-server-side-encryption-bucket-key-enabled
) that are specified in the CreateSession
request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession
request to protect new objects in the directory bucket. \n
When you use the CLI or the Amazon Web Services SDKs, for CreateSession
, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession
request. It's not supported to override the encryption settings values in the CreateSession
request. \n So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n the encryption request headers must match the default encryption configuration of the directory bucket.\n\n
If x-amz-server-side-encryption
has a valid value of aws:kms
\n or aws:kms:dsse
, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object. If you specify\n x-amz-server-side-encryption:aws:kms
or\n x-amz-server-side-encryption:aws:kms:dsse
, but do not provide\n x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3
) to protect the data. If the KMS key does not exist in the same\n account that's issuing the command, you must use the full ARN and not just the ID.
This functionality is not supported for directory buckets.
\nSpecifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same\n account that's issuing the command, you must use the full Key ARN not the Key ID.
\n\n General purpose buckets - If you specify x-amz-server-side-encryption
with aws:kms
or aws:kms:dsse
, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS \n key to use. If you specify\n x-amz-server-side-encryption:aws:kms
or\n x-amz-server-side-encryption:aws:kms:dsse
, but do not provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3
) to protect the data.
\n Directory buckets - If you specify x-amz-server-side-encryption
with aws:kms
, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id
header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request
error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3
) isn't supported. \n
Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject
or CopyObject
operations on\n this object. This value must be explicitly added during CopyObject
operations.
This functionality is not supported for directory buckets.
\nSpecifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject
operations on\n this object.
\n General purpose buckets - This value must be explicitly added during CopyObject
operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.
\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true
causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.
Specifying this header with a PUT action doesn’t affect bucket-level settings for S3\n Bucket Key.
\nThis functionality is not supported for directory buckets.
\nSpecifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
\n\n General purpose buckets - Setting this header to\n true
causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3\n Bucket Key.
\n Directory buckets - S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
Optional configuration to replicate existing source bucket objects. For more\n information, see Replicating Existing Objects in the Amazon S3 User Guide.\n
" + "smithy.api#documentation": "Optional configuration to replicate existing source bucket objects.\n
\nThis parameter is no longer supported. To replicate existing objects, see Replicating existing objects with S3 Batch Replication in the Amazon S3 User Guide.
\nServer-side encryption algorithm to use for the default encryption.
", + "smithy.api#documentation": "Server-side encryption algorithm to use for the default encryption.
\nFor directory buckets, there are only two supported values for server-side encryption: AES256
and aws:kms
.
Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default\n encryption. This parameter is allowed if and only if SSEAlgorithm
is set to\n aws:kms
or aws:kms:dsse
.
You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS\n key.
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key Alias: alias/alias-name
\n
If you use a key ID, you can run into a LogDestination undeliverable error when creating\n a VPC flow log.
\nIf you are using encryption with cross-account or Amazon Web Services service operations you must use\n a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.
\nAmazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service\n Developer Guide.
\nAmazon Web Services Key Management Service (KMS) customer managed key ID to use for the default\n encryption.
\n\n General purpose buckets - This parameter is allowed if and only if SSEAlgorithm
is set to\n aws:kms
or aws:kms:dsse
.
\n Directory buckets - This parameter is allowed if and only if SSEAlgorithm
is set to\n aws:kms
.
You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS\n key.
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key Alias: alias/alias-name
\n
If you are using encryption with cross-account or Amazon Web Services service operations, you must use\n a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.
\n\n General purpose buckets - If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner. Also, if you use a key ID, you can run into a LogDestination undeliverable error when creating\n a VPC flow log. \n
\n\n Directory buckets - When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
\nAmazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service\n Developer Guide.
\nDescribes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates\n an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted\n with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more\n information, see PUT Bucket encryption in\n the Amazon S3 API Reference.
\nIf you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.
\nDescribes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. For more\n information, see PutBucketEncryption.
\n\n General purpose buckets - If you don't specify a customer managed key at configuration, Amazon S3 automatically creates\n an Amazon Web Services KMS key (aws/s3
) in your Amazon Web Services account the first time that you add an object encrypted\n with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS.
\n Directory buckets - Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3
) isn't supported. \n
\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS.
\nSpecifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS\n (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the\n BucketKeyEnabled
element to true
causes Amazon S3 to use an S3\n Bucket Key. By default, S3 Bucket Key is not enabled.
For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.
" + "smithy.api#documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS\n (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the\n BucketKeyEnabled
element to true
causes Amazon S3 to use an S3\n Bucket Key.
\n General purpose buckets - By default, S3 Bucket Key is not enabled. For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.
\n\n Directory buckets - S3 Bucket Keys are always enabled for GET
and PUT
operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
Specifies the default server-side encryption configuration.
\nIf you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.
\nSpecifies the default server-side encryption configuration.
\n\n General purpose buckets - If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.
\n\n Directory buckets - When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.
\nThe established temporary security credentials of the session.
\n\n Directory buckets - These session credentials are only supported for the authentication and authorization of Zonal endpoint APIs on directory buckets.
\nThe established temporary security credentials of the session.
\n\n Directory buckets - These session credentials are only supported for the authentication and authorization of Zonal endpoint API operations on directory buckets.
\nUploads a part in a multipart upload.
\nIn this operation, you provide new data as a part of an object in your request. However, you have an option\n to specify your existing Amazon S3 object as a data source for the part you are uploading. To\n upload a part from an existing object, you use the UploadPartCopy operation.\n
\nYou must initiate a multipart upload (see CreateMultipartUpload)\n before you can upload any part. In response to your initiate request, Amazon S3 returns an\n upload ID, a unique identifier that you must include in your upload part request.
\nPart numbers can be any number from 1 to 10,000, inclusive. A part number uniquely\n identifies a part and also defines its position within the object being created. If you\n upload a new part using the same part number that was used with a previous part, the\n previously uploaded part is overwritten.
\nFor information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.
\nAfter you initiate multipart upload and upload\n one or more parts, you must either complete or abort multipart upload in order to stop\n getting charged for storage of the uploaded parts. Only after you either complete or abort\n multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts\n storage.
\nFor more information on multipart uploads, go to Multipart Upload Overview in the\n Amazon S3 User Guide .
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - To\n perform a multipart upload with encryption using an Key Management Service key, the\n requester must have permission to the kms:Decrypt
and\n kms:GenerateDataKey
actions on the key. The requester must\n also have permissions for the kms:GenerateDataKey
action for\n the CreateMultipartUpload
API. Then, the requester needs\n permissions for the kms:Decrypt
action on the\n UploadPart
and UploadPartCopy
APIs.
These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For\n more information about KMS permissions, see Protecting data\n using server-side encryption with KMS in the\n Amazon S3 User Guide. For information about the\n permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the\n Amazon S3 User Guide.
\n\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
\n General purpose bucket - To ensure that data is not corrupted traversing the network, specify the\n Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the\n x-amz-content-sha256
header as a checksum instead of\n Content-MD5
. For more information see Authenticating\n Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).
\n Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.
\n\n General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. You have \n mutually exclusive options to protect data using server-side encryption in Amazon S3, depending\n on how you choose to manage the encryption keys. Specifically, the encryption key options\n are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys\n (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by\n default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption\n with other key options. The option you use depends on whether you want to use KMS keys\n (SSE-KMS) or provide your own encryption key (SSE-C).
\nServer-side encryption is supported by the S3 Multipart Upload operations. Unless you are\n using a customer-provided encryption key (SSE-C), you don't need to specify the encryption\n parameters in each UploadPart request. Instead, you only need to specify the server-side\n encryption parameters in the initial Initiate Multipart request. For more information, see\n CreateMultipartUpload.
\nIf you request server-side encryption using a customer-provided encryption key (SSE-C)\n in your initiate multipart upload request, you must provide identical encryption\n information in each part upload using the following request headers.
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\n\n Directory bucket - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
\n For more information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.
\nError Code: NoSuchUpload
\n
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
\nHTTP Status Code: 404 Not Found
\nSOAP Fault Code Prefix: Client
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPart
:
\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nUploads a part in a multipart upload.
\nIn this operation, you provide new data as a part of an object in your request. However, you have an option\n to specify your existing Amazon S3 object as a data source for the part you are uploading. To\n upload a part from an existing object, you use the UploadPartCopy operation.\n
\nYou must initiate a multipart upload (see CreateMultipartUpload)\n before you can upload any part. In response to your initiate request, Amazon S3 returns an\n upload ID, a unique identifier that you must include in your upload part request.
\nPart numbers can be any number from 1 to 10,000, inclusive. A part number uniquely\n identifies a part and also defines its position within the object being created. If you\n upload a new part using the same part number that was used with a previous part, the\n previously uploaded part is overwritten.
\nFor information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.
\nAfter you initiate multipart upload and upload\n one or more parts, you must either complete or abort multipart upload in order to stop\n getting charged for storage of the uploaded parts. Only after you either complete or abort\n multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts\n storage.
\nFor more information on multipart uploads, go to Multipart Upload Overview in the\n Amazon S3 User Guide .
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - To\n perform a multipart upload with encryption using an Key Management Service key, the\n requester must have permission to the kms:Decrypt
and\n kms:GenerateDataKey
actions on the key. The requester must\n also have permissions for the kms:GenerateDataKey
action for\n the CreateMultipartUpload
API. Then, the requester needs\n permissions for the kms:Decrypt
action on the\n UploadPart
and UploadPartCopy
APIs.
These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For\n more information about KMS permissions, see Protecting data\n using server-side encryption with KMS in the\n Amazon S3 User Guide. For information about the\n permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the\n Amazon S3 User Guide.
\n\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession
\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession
\n .
If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
\n General purpose bucket - To ensure that data is not corrupted traversing the network, specify the\n Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the\n x-amz-content-sha256
header as a checksum instead of\n Content-MD5
. For more information see Authenticating\n Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).
\n Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.
\n\n General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. You have \n mutually exclusive options to protect data using server-side encryption in Amazon S3, depending\n on how you choose to manage the encryption keys. Specifically, the encryption key options\n are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys\n (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by\n default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption\n with other key options. The option you use depends on whether you want to use KMS keys\n (SSE-KMS) or provide your own encryption key (SSE-C).
\nServer-side encryption is supported by the S3 Multipart Upload operations. Unless you are\n using a customer-provided encryption key (SSE-C), you don't need to specify the encryption\n parameters in each UploadPart request. Instead, you only need to specify the server-side\n encryption parameters in the initial Initiate Multipart request. For more information, see\n CreateMultipartUpload.
\nIf you request server-side encryption using a customer-provided encryption key (SSE-C)\n in your initiate multipart upload request, you must provide identical encryption\n information in each part upload using the following request headers.
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\n\n For more information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.
\n\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
).
Error Code: NoSuchUpload
\n
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
\nHTTP Status Code: 404 Not Found
\nSOAP Fault Code Prefix: Client
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPart
:
\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nUploads a part by copying data from an existing object as data source. To specify the\n data source, you add the request header x-amz-copy-source
in your request. To specify \n a byte range, you add the request header x-amz-copy-source-range
in your\n request.
For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.
\nInstead of copying data from an existing object as part data, you might use the UploadPart\n action to upload new data as a part of an object in your request.
\nYou must initiate a multipart upload before you can upload any part. In response to your\n initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in\n your upload part request.
\nFor conceptual information about multipart uploads, see Uploading\n Objects Using Multipart Upload in the\n Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart\n upload, see Operations on Objects in\n the Amazon S3 User Guide.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed. For more information, see REST Authentication.
\n Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the \n temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
\nYou must have READ
access to the source object and WRITE
\n access to the destination bucket.
\n General purpose bucket permissions - You\n must have the permissions in a policy based on the bucket types of your\n source bucket and destination bucket in an UploadPartCopy
\n operation.
If the source object is in a general purpose bucket, you must have the\n \n s3:GetObject
\n \n permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the\n \n s3:PutObject
\n \n permission to write the object copy to the destination bucket.
To perform a multipart upload with encryption using an Key Management Service\n key, the requester must have permission to the\n kms:Decrypt
and kms:GenerateDataKey
\n actions on the key. The requester must also have permissions for the\n kms:GenerateDataKey
action for the\n CreateMultipartUpload
API. Then, the requester needs\n permissions for the kms:Decrypt
action on the\n UploadPart
and UploadPartCopy
APIs. These\n permissions are required because Amazon S3 must decrypt and read data from\n the encrypted file parts before it completes the multipart upload. For\n more information about KMS permissions, see Protecting\n data using server-side encryption with KMS in the\n Amazon S3 User Guide. For information about the\n permissions required to use the multipart upload API, see Multipart upload\n and permissions and Multipart upload API and permissions in the\n Amazon S3 User Guide.
\n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession
\n permission in\n the Action
element of a policy to read the object. By\n default, the session is in the ReadWrite
mode. If you\n want to restrict the access, you can explicitly set the\n s3express:SessionMode
condition key to\n ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the \n \n s3express:CreateSession
\n permission in the\n Action
element of a policy to write the object\n to the destination. The s3express:SessionMode
condition\n key cannot be set to ReadOnly
on the copy destination.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.
\n\n General purpose buckets - \n \n For information about using server-side encryption with customer-provided\n encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.\n
\n Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
Error Code: NoSuchUpload
\n
Description: The specified multipart upload does not exist. The\n upload ID might be invalid, or the multipart upload might have been\n aborted or completed.
\nHTTP Status Code: 404 Not Found
\nError Code: InvalidRequest
\n
Description: The specified copy source is not supported as a\n byte-range copy source.
\nHTTP Status Code: 400 Bad Request
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPartCopy
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nUploads a part by copying data from an existing object as data source. To specify the\n data source, you add the request header x-amz-copy-source
in your request. To specify \n a byte range, you add the request header x-amz-copy-source-range
in your\n request.
For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.
\nInstead of copying data from an existing object as part data, you might use the UploadPart\n action to upload new data as a part of an object in your request.
\nYou must initiate a multipart upload before you can upload any part. In response to your\n initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in\n your upload part request.
\nFor conceptual information about multipart uploads, see Uploading\n Objects Using Multipart Upload in the\n Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart\n upload, see Operations on Objects in\n the Amazon S3 User Guide.
\n\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed. For more information, see REST Authentication.
\n Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the \n temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
\nYou must have READ
access to the source object and WRITE
\n access to the destination bucket.
\n General purpose bucket permissions - You\n must have the permissions in a policy based on the bucket types of your\n source bucket and destination bucket in an UploadPartCopy
\n operation.
If the source object is in a general purpose bucket, you must have the\n \n s3:GetObject
\n \n permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the\n \n s3:PutObject
\n \n permission to write the object copy to the destination bucket.
To perform a multipart upload with encryption using an Key Management Service\n key, the requester must have permission to the\n kms:Decrypt
and kms:GenerateDataKey
\n actions on the key. The requester must also have permissions for the\n kms:GenerateDataKey
action for the\n CreateMultipartUpload
API. Then, the requester needs\n permissions for the kms:Decrypt
action on the\n UploadPart
and UploadPartCopy
APIs. These\n permissions are required because Amazon S3 must decrypt and read data from\n the encrypted file parts before it completes the multipart upload. For\n more information about KMS permissions, see Protecting\n data using server-side encryption with KMS in the\n Amazon S3 User Guide. For information about the\n permissions required to use the multipart upload API, see Multipart upload\n and permissions and Multipart upload API and permissions in the\n Amazon S3 User Guide.
\n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession
\n permission in\n the Action
element of a policy to read the object. By\n default, the session is in the ReadWrite
mode. If you\n want to restrict the access, you can explicitly set the\n s3express:SessionMode
condition key to\n ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the \n \n s3express:CreateSession
\n permission in the\n Action
element of a policy to write the object\n to the destination. The s3express:SessionMode
condition\n key cannot be set to ReadOnly
on the copy destination.
If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.
\n\n General purpose buckets - \n \n For information about using server-side encryption with customer-provided\n encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.\n
\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) and server-side encryption with KMS keys (SSE-KMS) (aws:kms
). For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
For directory buckets, when you perform a CreateMultipartUpload
operation and an UploadPartCopy
operation, \n the request headers you provide in the CreateMultipartUpload
request must match the default encryption configuration of the destination bucket.
S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.
\nError Code: NoSuchUpload
\n
Description: The specified multipart upload does not exist. The\n upload ID might be invalid, or the multipart upload might have been\n aborted or completed.
\nHTTP Status Code: 404 Not Found
\nError Code: InvalidRequest
\n
Description: The specified copy source is not supported as a\n byte-range copy source.
\nHTTP Status Code: 400 Bad Request
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPartCopy
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThe server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
).
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
\nThis functionality is not supported for directory buckets.
\nIf present, indicates the ID of the KMS key that was used for object encryption.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
\nThis functionality is not supported for directory buckets.
\nIndicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -33944,7 +34000,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
).
For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256
, aws:kms
).
If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.
\nThis functionality is not supported for directory buckets.
\nIf present, indicates the ID of the KMS key that was used for object encryption.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
\nThis functionality is not supported for directory buckets.
\nIndicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } },