From 8290f86082cbebf53b0cc5ee02f82319dd37d195 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Tue, 13 Feb 2024 17:23:07 +0800 Subject: [PATCH 1/5] HDDS-9680. Use md5 hash of multipart object part's content as ETag (#5668) (cherry picked from commit 7370676dcf476a01094f7efa97aba98780a5073f) --- .../org/apache/hadoop/ozone/OzoneConsts.java | 5 + .../content/feature/S3-Tenant-Commands.md | 2 +- .../OzoneMultipartUploadPartListParts.java | 9 +- .../hadoop/ozone/client/rpc/RpcClient.java | 3 +- .../OmMultipartCommitUploadPartInfo.java | 11 +- .../OmMultipartUploadCompleteList.java | 5 +- .../helpers/OmMultipartUploadListParts.java | 3 +- .../hadoop/ozone/om/helpers/OmPartInfo.java | 11 +- ...ManagerProtocolClientSideTranslatorPB.java | 3 +- .../main/smoketest/s3/MultipartUpload.robot | 33 ++-- .../TestOzoneFSWithObjectStoreCreate.java | 9 +- .../hadoop/ozone/TestMultipartObjectGet.java | 4 +- .../client/rpc/TestOzoneAtRestEncryption.java | 18 +- ...TestOzoneClientMultipartUploadWithFSO.java | 172 ++++++++++-------- .../rpc/TestOzoneRpcClientAbstract.java | 128 ++++++++----- .../rpc/TestOzoneRpcClientWithRatis.java | 10 +- .../ozone/om/TestObjectStoreWithLegacyFS.java | 11 +- .../TestOzoneManagerHAWithStoppedNodes.java | 10 +- .../src/main/proto/OmClientProtocol.proto | 5 +- .../hadoop/ozone/om/KeyManagerImpl.java | 6 +- .../S3MultipartUploadCommitPartRequest.java | 10 +- .../S3MultipartUploadCompleteRequest.java | 85 +++++++-- .../ozone/om/request/OMRequestTestUtils.java | 38 +++- .../TestS3MultipartUploadCompleteRequest.java | 16 +- .../s3/multipart/TestS3MultipartResponse.java | 2 +- .../TestMultipartUploadCleanupService.java | 4 + .../om/service/TestOpenKeyCleanupService.java | 4 + .../ozone/s3/commontypes/KeyMetadata.java | 4 +- .../CompleteMultipartUploadRequest.java | 10 +- .../CompleteMultipartUploadResponse.java | 3 +- .../ozone/s3/endpoint/CopyObjectResponse.java | 3 +- .../ozone/s3/endpoint/CopyPartResult.java | 3 +- .../ozone/s3/endpoint/EndpointBase.java | 3 +- .../ozone/s3/endpoint/ListPartsResponse.java | 3 +- .../ozone/s3/endpoint/ObjectEndpoint.java | 13 +- .../s3/endpoint/ObjectEndpointStreaming.java | 21 +-- .../hadoop/ozone/client/OzoneBucketStub.java | 32 +++- .../client/OzoneDataStreamOutputStub.java | 4 +- .../ozone/client/OzoneOutputStreamStub.java | 5 +- ...eteMultipartUploadRequestUnmarshaller.java | 4 +- .../ozone/s3/endpoint/TestListParts.java | 6 +- .../endpoint/TestMultipartUploadComplete.java | 6 +- .../endpoint/TestMultipartUploadWithCopy.java | 13 +- .../ozone/s3/endpoint/TestPartUpload.java | 10 +- .../s3/endpoint/TestPartUploadWithStream.java | 11 +- 45 files changed, 519 insertions(+), 252 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 4f0f800dfdd..70566767eab 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -598,4 +598,9 @@ private OzoneConsts() { */ public static final String COMPACTION_LOG_TABLE = "compactionLogTable"; + + /** + * S3G multipart upload request's ETag header key. + */ + public static final String ETAG = "ETag"; } diff --git a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md index f9ea5f60846..23c01551503 100644 --- a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md +++ b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md @@ -432,7 +432,7 @@ bash-4.2$ aws s3api --endpoint-url http://s3g:9878 list-objects --bucket bucket- { "Key": "file1", "LastModified": "2022-02-16T00:10:00.000Z", - "ETag": "2022-02-16T00:10:00.000Z", + "ETag": "e99f93dedfe22e9a133dc3c634f14634", "Size": 3811, "StorageClass": "STANDARD" } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java index c1902cdb60d..67f8edf3140 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java @@ -104,12 +104,15 @@ public static class PartInfo { private String partName; private long modificationTime; private long size; + private String eTag; - public PartInfo(int number, String name, long time, long size) { + public PartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -127,5 +130,9 @@ public long getModificationTime() { public long getSize() { return size; } + + public String getETag() { + return eTag; + } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 953b8489a1b..8d61f8ef860 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1948,7 +1948,8 @@ public OzoneMultipartUploadPartListParts listParts(String volumeName, ozoneMultipartUploadPartListParts.addPart( new OzoneMultipartUploadPartListParts.PartInfo( omPartInfo.getPartNumber(), omPartInfo.getPartName(), - omPartInfo.getModificationTime(), omPartInfo.getSize())); + omPartInfo.getModificationTime(), omPartInfo.getSize(), + omPartInfo.getETag())); } return ozoneMultipartUploadPartListParts; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java index 646cb421e43..bbf1a1bdae5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java @@ -24,8 +24,15 @@ public class OmMultipartCommitUploadPartInfo { private final String partName; - public OmMultipartCommitUploadPartInfo(String name) { - this.partName = name; + private final String eTag; + + public OmMultipartCommitUploadPartInfo(String partName, String eTag) { + this.partName = partName; + this.eTag = eTag; + } + + public String getETag() { + return eTag; } public String getPartName() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java index 63e6353c185..ff39661d01b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java @@ -56,8 +56,9 @@ public Map getMultipartMap() { */ public List getPartsList() { List partList = new ArrayList<>(); - multipartMap.forEach((partNumber, partName) -> partList.add(Part - .newBuilder().setPartName(partName).setPartNumber(partNumber).build())); + multipartMap.forEach((partNumber, eTag) -> partList.add(Part + // set partName equal to eTag for back compatibility (partName is a required property) + .newBuilder().setPartName(eTag).setETag(eTag).setPartNumber(partNumber).build())); return partList; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java index fbf519c2268..0ba0e26acda 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java @@ -79,6 +79,7 @@ public void addPartList(List partInfos) { public void addProtoPartList(List partInfos) { partInfos.forEach(partInfo -> partInfoList.add(new OmPartInfo( partInfo.getPartNumber(), partInfo.getPartName(), - partInfo.getModificationTime(), partInfo.getSize()))); + partInfo.getModificationTime(), partInfo.getSize(), + partInfo.getETag()))); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java index 2d753a5caa5..e908c5a025f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java @@ -28,12 +28,15 @@ public class OmPartInfo { private String partName; private long modificationTime; private long size; + private String eTag; - public OmPartInfo(int number, String name, long time, long size) { + public OmPartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -52,9 +55,13 @@ public long getSize() { return size; } + public String getETag() { + return eTag; + } + public PartInfo getProto() { return PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName) .setModificationTime(modificationTime) - .setSize(size).build(); + .setSize(size).setETag(eTag).build(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index eae36091bdb..3be8de2ceea 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1628,7 +1628,8 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( .getCommitMultiPartUploadResponse(); OmMultipartCommitUploadPartInfo info = new - OmMultipartCommitUploadPartInfo(response.getPartName()); + OmMultipartCommitUploadPartInfo(response.getPartName(), + response.getETag()); return info; } diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index 04cce8fefcd..3a6ae0e45d4 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -94,21 +94,28 @@ Test Multipart Upload Complete Should contain ${result} UploadId #upload parts - Run Keyword Create Random file 5 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag + Run Keyword Create Random file 5 + ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} + ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should contain ${result} ETag + ${part1Md5Sum} = Execute md5sum /tmp/part1 | awk '{print $1}' + Should Be Equal As Strings ${eTag1} ${part1Md5Sum} + + Execute echo "Part2" > /tmp/part2 + ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} + ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should contain ${result} ETag + ${part2Md5Sum} = Execute md5sum /tmp/part2 | awk '{print $1}' + Should Be Equal As Strings ${eTag2} ${part2Md5Sum} #complete multipart upload - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey1 - Should contain ${result} ETag + ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' + Should contain ${result} ${BUCKET} + Should contain ${result} ${PREFIX}/multipartKey1 + ${resultETag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + ${expectedResultETag} = Execute echo -n ${eTag1}${eTag2} | md5sum | awk '{print $1}' + Should contain ${result} ETag + Should Be Equal As Strings ${resultETag} "${expectedResultETag}-2" #read file and check the key ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 /tmp/${PREFIX}-multipartKey1.result diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 71d1e4bdddd..50e361ef180 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -49,6 +50,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; +import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -58,6 +60,8 @@ import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -302,10 +306,13 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete() // This should succeed, as we check during creation of part or during // complete MPU. + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(MD5_HASH) + .digest(b)).toLowerCase()); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); // Should fail, as we have directory with same name. try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 37cc7a3411f..d3b212ed864 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -146,11 +146,11 @@ private CompleteMultipartUploadRequest.Part uploadPart(String uploadID, Response response = REST.put(BUCKET, KEY, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); CompleteMultipartUploadRequest.Part part = new CompleteMultipartUploadRequest.Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 0d9aedeab04..5e93bda5a84 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -22,6 +22,7 @@ import java.net.URI; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.time.Instant; import java.util.ArrayList; @@ -35,6 +36,7 @@ import com.google.common.cache.Cache; import org.apache.hadoop.conf.StorageUnit; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; @@ -117,6 +119,7 @@ class TestOzoneAtRestEncryption { private static final int DEFAULT_CRYPTO_BUFFER_SIZE = 8 * 1024; // 8KB // (this is the default Crypto Buffer size as determined by the config // hadoop.security.crypto.buffer.size) + private static MessageDigest eTagProvider; @BeforeAll static void init() throws Exception { @@ -166,6 +169,7 @@ static void init() throws Exception { // create test key createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); } @AfterAll @@ -647,14 +651,17 @@ private String uploadStreamPart(OzoneBucket bucket, String keyName, ByteBuffer dataBuffer = ByteBuffer.wrap(data); multipartStreamKey.write(dataBuffer, 0, length); + multipartStreamKey.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); multipartStreamKey.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = multipartStreamKey.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private String uploadPart(OzoneBucket bucket, String keyName, @@ -662,14 +669,17 @@ private String uploadPart(OzoneBucket bucket, String keyName, OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private void completeMultipartUpload(OzoneBucket bucket, String keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index cc7864a3b53..c50cd4b2f26 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -17,8 +17,14 @@ package org.apache.hadoop.ozone.client.rpc; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.HashMap; + +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -98,6 +104,7 @@ public class TestOzoneClientMultipartUploadWithFSO { private static ObjectStore store = null; private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; + private static MessageDigest eTagProvider; private static String scmId = UUID.randomUUID().toString(); @@ -124,6 +131,7 @@ public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); } /** @@ -163,7 +171,7 @@ static void shutdownCluster() throws IOException { cluster.shutdown(); } } - + @Before public void preTest() throws Exception { volumeName = UUID.randomUUID().toString(); @@ -222,13 +230,14 @@ public void testUploadPartWithNoOverride() throws IOException { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); Assert.assertNotNull(commitUploadPartInfo); - Assert.assertNotNull(commitUploadPartInfo.getPartName()); + Assert.assertNotNull(commitUploadPartInfo.getETag()); } @Test @@ -238,12 +247,12 @@ public void testUploadPartOverrideWithRatis() throws Exception { ReplicationType.RATIS, THREE); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - sampleData.getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, sampleData.getBytes(UTF_8)); //Overwrite the part by creating part key with same part number. - String partNameNew = uploadPart(bucket, keyName, uploadID, partNumber, - "name".getBytes(UTF_8)); + Pair partNameAndETagNew = uploadPart(bucket, keyName, + uploadID, partNumber, "name".getBytes(UTF_8)); // PartName should be same from old part Name. // AWS S3 for same content generates same partName during upload part. @@ -253,8 +262,10 @@ public void testUploadPartOverrideWithRatis() throws Exception { // So, when a part is override partNames will still be same irrespective // of content in ozone s3. This will make S3 Mpu completeMPU pass when // comparing part names and large file uploads work using aws cp. - Assert.assertEquals("Part names should be same", partName, - partNameNew); + Assert.assertEquals(partNameAndETag.getKey(), partNameAndETagNew.getKey()); + + // ETags are not equal due to content differences + Assert.assertNotEquals(partNameAndETag.getValue(), partNameAndETagNew.getValue()); // old part bytes written needs discard and have only // new part bytes in quota for this bucket @@ -264,7 +275,8 @@ public void testUploadPartOverrideWithRatis() throws Exception { } @Test - public void testUploadTwiceWithEC() throws IOException { + public void testUploadTwiceWithEC() + throws IOException, NoSuchAlgorithmException { bucketName = UUID.randomUUID().toString(); bucket = getOzoneECBucket(bucketName); @@ -275,12 +287,12 @@ public void testUploadTwiceWithEC() throws IOException { String uploadID = multipartInfo.getUploadID(); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - data); - - Map partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, data); + + Map eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); long replicatedSize = QuotaUtil.getReplicatedSize(data.length, bucket.getReplicationConfig()); @@ -291,12 +303,12 @@ public void testUploadTwiceWithEC() throws IOException { multipartInfo = bucket.initiateMultipartUpload(keyName); uploadID = multipartInfo.getUploadID(); - partName = uploadPart(bucket, keyName, uploadID, partNumber, + partNameAndETag = uploadPart(bucket, keyName, uploadID, partNumber, data); - partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); // used sized should remain same, overwrite previous upload Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(), @@ -304,7 +316,8 @@ public void testUploadTwiceWithEC() throws IOException { } @Test - public void testUploadAbortWithEC() throws IOException { + public void testUploadAbortWithEC() + throws IOException, NoSuchAlgorithmException { byte[] data = generateData(81920, (byte) 97); bucketName = UUID.randomUUID().toString(); @@ -316,7 +329,7 @@ public void testUploadAbortWithEC() throws IOException { String uploadID = multipartInfo.getUploadID(); int partNumber = 1; uploadPart(bucket, keyName, uploadID, partNumber, data); - + long replicatedSize = QuotaUtil.getReplicatedSize(data.length, bucket.getReplicationConfig()); Assert.assertEquals(volume.getBucket(bucketName).getUsedBytes(), @@ -339,7 +352,7 @@ private OzoneBucket getOzoneECBucket(String myBucket) volume.createBucket(myBucket, bucketArgs.build()); return volume.getBucket(myBucket); } - + @Test public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { // Initiate multipart upload @@ -347,19 +360,19 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { ONE); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); + eTagsMap.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes(UTF_8)); - partsMap.put(2, partName); + eTagsMap.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -370,22 +383,24 @@ public void testMultipartUploadWithDiscardedUnusedPartSize() byte[] data = generateData(10000000, (byte) 97); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); - // Upload part 1 and add it to the partsMap for completing the upload. - String partName1 = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName1); + // Upload part 1 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, data); + eTagsMap.put(1, partNameAndETag1.getValue()); - // Upload part 2 and add it to the partsMap for completing the upload. - String partName2 = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName2); + // Upload part 2 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, data); + eTagsMap.put(2, partNameAndETag2.getValue()); - // Upload part 3 but do not add it to the partsMap. + // Upload part 3 but do not add it to the eTagsMap. uploadPart(bucket, keyName, uploadID, 3, data); - completeMultipartUpload(bucket, keyName, uploadID, partsMap); + completeMultipartUpload(bucket, keyName, uploadID, eTagsMap); - // Check the bucket size. Since part number 3 was not added to the partsMap, + // Check the bucket size. Since part number 3 was not added to the eTagsMap, // the unused part size should be discarded from the bucket size, // 30000000 - 10000000 = 20000000 long bucketSize = volume.getBucket(bucketName).getUsedBytes(); @@ -472,6 +487,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -479,11 +497,14 @@ public void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, - data.length, 2, uploadID); + data.length, 2, uploadID); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -569,12 +590,13 @@ public void testAbortUploadSuccessWithParts() throws Exception { String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); OMMetadataManager metadataMgr = cluster.getOzoneManager().getMetadataManager(); - String multipartKey = verifyUploadedPart(uploadID, partName, metadataMgr); + String multipartKey = verifyUploadedPart(uploadID, partNameAndETag.getKey(), + metadataMgr); bucket.abortMultipartUpload(keyName, uploadID); @@ -601,17 +623,17 @@ public void testListMultipartUploadParts() throws Exception { Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -669,7 +691,6 @@ private void verifyPartNamesInDB(Map partsMap, listPartNames.remove(partKeyName); } - Assert.assertTrue("Wrong partKeyName format in DB!", listPartNames.isEmpty()); } @@ -692,17 +713,17 @@ public void testListMultipartUploadPartsWithContinuation() Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -776,9 +797,9 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() bucket.listParts(keyName, uploadID, 100, 2); // Should return empty - Assert.assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); + Assert.assertEquals( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), ozoneMultipartUploadPartListParts.getReplicationConfig()); @@ -950,22 +971,29 @@ private String initiateMultipartUpload(OzoneBucket oBucket, String kName, return uploadID; } - private String uploadPart(OzoneBucket oBucket, String kName, String - uploadID, int partNumber, byte[] data) throws IOException { + private Pair uploadPart(OzoneBucket oBucket, String kName, + String uploadID, int partNumber, + byte[] data) + throws IOException, NoSuchAlgorithmException { OzoneOutputStream ozoneOutputStream = oBucket.createMultipartKey(kName, - data.length, partNumber, uploadID); - ozoneOutputStream.write(data, 0, - data.length); + data.length, partNumber, uploadID); + ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); Assert.assertNotNull(omMultipartCommitUploadPartInfo); + Assert.assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } private void completeMultipartUpload(OzoneBucket oBucket, String kName, @@ -975,9 +1003,9 @@ private void completeMultipartUpload(OzoneBucket oBucket, String kName, Assert.assertNotNull(omMultipartUploadCompleteInfo); Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), oBucket - .getName()); + .getName()); Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), oBucket - .getVolumeName()); + .getVolumeName()); Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), kName); Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 6ef54e8faf4..7d6d610207d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -20,6 +20,8 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.time.Instant; import java.util.ArrayList; @@ -38,6 +40,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -136,7 +141,9 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.DEFAULT_OM_UPDATE_ID; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; @@ -157,6 +164,7 @@ import static org.slf4j.event.Level.DEBUG; import org.apache.ozone.test.tag.Unhealthy; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; @@ -190,6 +198,12 @@ public abstract class TestOzoneRpcClientAbstract { READ, ACCESS); private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, remoteGroupName, READ, ACCESS); + private static MessageDigest eTagProvider; + + @BeforeAll + public static void initialize() throws NoSuchAlgorithmException { + eTagProvider = MessageDigest.getInstance(MD5_HASH); + } private static String scmId = UUID.randomUUID().toString(); private static String clusterId; @@ -1519,6 +1533,7 @@ public void testUsedBytesWithUploadPart() throws IOException { sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); assertEquals(valueLength, store.getVolume(volumeName) @@ -2684,13 +2699,14 @@ void testUploadPartWithNoOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); } @ParameterizedTest @@ -2721,6 +2737,7 @@ void testUploadPartOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream @@ -2728,7 +2745,7 @@ void testUploadPartOverride(ReplicationConfig replication) assertNotNull(commitUploadPartInfo); String partName = commitUploadPartInfo.getPartName(); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // Overwrite the part by creating part key with same part number // and different content. @@ -2736,13 +2753,14 @@ void testUploadPartOverride(ReplicationConfig replication) ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // AWS S3 for same content generates same partName during upload part. // In AWS S3 ETag is generated from md5sum. In Ozone right now we @@ -2872,12 +2890,13 @@ public void testMultipartUploadWithACL() throws Exception { // Upload part byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 1); - String partName = uploadPart(bucket, keyName2, uploadId, 1, data); - Map partsMap = new TreeMap<>(); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName2, + uploadId, 1, data); + Map eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, partNameAndETag.getValue()); // Complete multipart upload request - completeMultipartUpload(bucket2, keyName2, uploadId, partsMap); + completeMultipartUpload(bucket2, keyName2, uploadId, eTagsMaps); // User without permission cannot read multi-uploaded object try (OzoneInputStream ignored = bucket2.readKey(keyName)) { @@ -2928,21 +2947,21 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { anyReplication()); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMaps = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, + uploadID, 1, "data".getBytes(UTF_8)); + eTagsMaps.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes(UTF_8)); - partsMap.put(2, partName); + eTagsMaps.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @Test @@ -2989,11 +3008,11 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(1, UUID.randomUUID().toString()); + TreeMap eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, DigestUtils.md5Hex(UUID.randomUUID().toString())); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @@ -3015,11 +3034,11 @@ public void testMultipartUploadWithMissingParts() throws Exception { uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(3, "random"); + TreeMap eTagsMap = new TreeMap<>(); + eTagsMap.put(3, DigestUtils.md5Hex("random")); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -3125,6 +3144,9 @@ void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -3133,10 +3155,13 @@ void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -3213,17 +3238,17 @@ void testListMultipartUploadParts(ReplicationConfig replication) Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -3264,17 +3289,17 @@ void testListMultipartUploadPartsWithContinuation( Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -3747,19 +3772,20 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, // than 5mb int length = 0; byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val); - String partName = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName); + Pair partNameAndEtag = uploadPart(bucket, keyName, uploadID, + 1, data); + partsMap.put(1, partNameAndEtag.getValue()); length += data.length; - partName = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName); + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 2, data); + partsMap.put(2, partNameAndEtag.getValue()); length += data.length; String part3 = UUID.randomUUID().toString(); - partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( UTF_8)); - partsMap.put(3, partName); + partsMap.put(3, partNameAndEtag.getValue()); length += part3.getBytes(UTF_8).length; // Complete multipart upload request @@ -3816,20 +3842,26 @@ private String initiateMultipartUpload(OzoneBucket bucket, String keyName, return uploadID; } - private String uploadPart(OzoneBucket bucket, String keyName, String - uploadID, int partNumber, byte[] data) throws Exception { + private Pair uploadPart(OzoneBucket bucket, String keyName, + String uploadID, int partNumber, + byte[] data) throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index 9fb87cbaa6f..c3bbd793dc1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -24,6 +24,8 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.HashMap; import java.util.UUID; @@ -31,6 +33,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; @@ -38,6 +41,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -180,7 +184,8 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { } @Test - public void testMultiPartUploadWithStream() throws IOException { + public void testMultiPartUploadWithStream() + throws IOException, NoSuchAlgorithmException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); @@ -213,6 +218,9 @@ public void testMultiPartUploadWithStream() throws IOException { keyName, valueLength, 1, uploadID); ozoneStreamOutput.write(ByteBuffer.wrap(sampleData), 0, valueLength); + ozoneStreamOutput.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) + .digest(sampleData)).toLowerCase()); ozoneStreamOutput.close(); OzoneMultipartUploadPartListParts parts = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index 0b00f9b5780..1d80ae6d83d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; @@ -52,6 +53,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.Map; @@ -215,7 +218,8 @@ public void testMultiPartCompleteUpload() throws Exception { } private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( - OzoneBucket bucket, String keyName) throws IOException { + OzoneBucket bucket, String keyName) + throws IOException, NoSuchAlgorithmException { OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); @@ -228,6 +232,9 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) + .digest(data)).toLowerCase()); ozoneOutputStream.close(); if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) { @@ -247,7 +254,7 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( ozoneOutputStream.getCommitUploadPartInfo(); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java index 1a65d5d0653..735f5d25b9c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -24,6 +25,7 @@ import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; @@ -185,11 +187,12 @@ private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), 1, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -361,7 +364,7 @@ private void validateListParts(OzoneBucket ozoneBucket, String keyName, for (int i = 0; i < partsMap.size(); i++) { assertEquals(partsMap.get(partInfoList.get(i).getPartNumber()), - partInfoList.get(i).getPartName()); + partInfoList.get(i).getETag()); } @@ -378,9 +381,10 @@ private String createMultipartUploadPartKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), partNumber, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); - return ozoneOutputStream.getCommitUploadPartInfo().getPartName(); + return ozoneOutputStream.getCommitUploadPartInfo().getETag(); } @Test diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index fd83981507c..9efb64a2a4d 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1582,8 +1582,9 @@ message MultipartCommitUploadPartRequest { } message MultipartCommitUploadPartResponse { - // This one is returned as Etag for S3. optional string partName = 1; + // This one is returned as Etag for S3. + optional string eTag = 2; } message MultipartUploadCompleteRequest { @@ -1601,6 +1602,7 @@ message MultipartUploadCompleteResponse { message Part { required uint32 partNumber = 1; required string partName = 2; + optional string eTag = 3; } message MultipartUploadAbortRequest { @@ -1673,6 +1675,7 @@ message PartInfo { required string partName = 2; required uint64 modificationTime = 3; required uint64 size = 4; + optional string eTag = 5; } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index da3d1a17ddd..c7d675dddcb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -120,6 +120,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT; @@ -790,7 +791,10 @@ public OmMultipartUploadListParts listParts(String volumeName, OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), partName, partKeyInfo.getPartKeyInfo().getModificationTime(), - partKeyInfo.getPartKeyInfo().getDataSize()); + partKeyInfo.getPartKeyInfo().getDataSize(), + partKeyInfo.getPartKeyInfo().getMetadataList().stream() + .filter(keyValue -> keyValue.getKey().equals(ETAG)) + .findFirst().get().getValue()); omPartInfoList.add(omPartInfo); //if there are parts, use replication type from one of the parts diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 60920568933..bc3ef718c61 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -239,9 +239,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, correctedSpace); omBucketInfo.incrUsedBytes(correctedSpace); - omResponse.setCommitMultiPartUploadResponse( - MultipartCommitUploadPartResponse.newBuilder() - .setPartName(partName)); + MultipartCommitUploadPartResponse.Builder commitResponseBuilder = MultipartCommitUploadPartResponse.newBuilder() + .setPartName(partName); + String eTag = omKeyInfo.getMetadata().get(OzoneConsts.ETAG); + if (eTag != null) { + commitResponseBuilder.setETag(eTag); + } + omResponse.setCommitMultiPartUploadResponse(commitResponseBuilder); omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index de9340c237b..451599dee8e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -27,6 +27,8 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -80,6 +82,32 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class); + private BiFunction eTagBasedValidator = + (part, partKeyInfo) -> { + String eTag = part.getETag(); + AtomicReference dbPartETag = new AtomicReference<>(); + String dbPartName = null; + if (partKeyInfo != null) { + partKeyInfo.getPartKeyInfo().getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) + .findFirst().ifPresent(kv -> dbPartETag.set(kv.getValue())); + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(eTag, partKeyInfo == null ? null : + dbPartETag.get(), StringUtils.equals(eTag, dbPartETag.get()) || StringUtils.equals(eTag, dbPartName)); + }; + private BiFunction partNameBasedValidator = + (part, partKeyInfo) -> { + String partName = part.getPartName(); + String dbPartName = null; + if (partKeyInfo != null) { + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(partName, partKeyInfo == null ? null : + dbPartName, StringUtils.equals(partName, dbPartName)); + }; + public S3MultipartUploadCompleteRequest(OMRequest omRequest, BucketLayout bucketLayout) { super(omRequest, bucketLayout); @@ -253,7 +281,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setVolume(requestedVolume) .setBucket(requestedBucket) .setKey(keyName) - .setHash(omKeyInfo.getMetadata().get("ETag"))); + .setHash(omKeyInfo.getMetadata().get(OzoneConsts.ETAG))); long volumeId = omMetadataManager.getVolumeId(volumeName); long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); @@ -393,7 +421,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) .setAcls(dbOpenKeyInfo.getAcls()) - .addMetadata("ETag", + .addMetadata(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); // Check if db entry has ObjectID. This check is required because // it is possible that between multipart key uploads and complete, @@ -423,7 +451,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, omKeyInfo.setModificationTime(keyArgs.getModificationTime()); omKeyInfo.setDataSize(dataSize); omKeyInfo.setReplicationConfig(dbOpenKeyInfo.getReplicationConfig()); - omKeyInfo.getMetadata().put("ETag", + omKeyInfo.getMetadata().put(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); } omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -495,24 +523,19 @@ private long getMultipartDataSize(String requestedVolume, OzoneManager ozoneManager) throws OMException { long dataSize = 0; int currentPartCount = 0; + boolean eTagBasedValidationAvailable = partsList.stream().allMatch(OzoneManagerProtocolProtos.Part::hasETag); // Now do actual logic, and check for any Invalid part during this. for (OzoneManagerProtocolProtos.Part part : partsList) { currentPartCount++; int partNumber = part.getPartNumber(); - String partName = part.getPartName(); - PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber); - - String dbPartName = null; - if (partKeyInfo != null) { - dbPartName = partKeyInfo.getPartName(); - } - if (!StringUtils.equals(partName, dbPartName)) { - String omPartName = partKeyInfo == null ? null : dbPartName; + MultipartCommitRequestPart requestPart = eTagBasedValidationAvailable ? + eTagBasedValidator.apply(part, partKeyInfo) : partNameBasedValidator.apply(part, partKeyInfo); + if (!requestPart.isValid()) { throw new OMException( failureMessage(requestedVolume, requestedBucket, keyName) + - ". Provided Part info is { " + partName + ", " + partNumber + - "}, whereas OM has partName " + omPartName, + ". Provided Part info is { " + requestPart.getRequestPartId() + ", " + partNumber + + "}, whereas OM has eTag " + requestPart.getOmPartId(), OMException.ResultCodes.INVALID_PART); } @@ -645,11 +668,41 @@ private String multipartUploadedKeyHash( OmMultipartKeyInfo.PartKeyInfoMap partsList) { StringBuffer keysConcatenated = new StringBuffer(); for (PartKeyInfo partKeyInfo: partsList) { - keysConcatenated.append(KeyValueUtil.getFromProtobuf(partKeyInfo - .getPartKeyInfo().getMetadataList()).get("ETag")); + String partPropertyToComputeHash = KeyValueUtil.getFromProtobuf(partKeyInfo.getPartKeyInfo().getMetadataList()) + .get(OzoneConsts.ETAG); + if (partPropertyToComputeHash == null) { + partPropertyToComputeHash = partKeyInfo.getPartName(); + } + keysConcatenated.append(partPropertyToComputeHash); } return DigestUtils.md5Hex(keysConcatenated.toString()) + "-" + partsList.size(); } + private static class MultipartCommitRequestPart { + private String requestPartId; + + private String omPartId; + + private boolean isValid; + + MultipartCommitRequestPart(String requestPartId, String omPartId, boolean isValid) { + this.requestPartId = requestPartId; + this.omPartId = omPartId; + this.isValid = isValid; + } + + public String getRequestPartId() { + return requestPartId; + } + + public String getOmPartId() { + return omPartId; + } + + public boolean isValid() { + return isValid; + } + } + } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index ac9dd41409d..951c1a7c738 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -19,12 +19,19 @@ package org.apache.hadoop.ozone.om.request; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.security.DigestInputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.UUID; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -1056,14 +1063,31 @@ public static OMRequest createCommitPartMPURequest(String volumeName, String bucketName, String keyName, long clientID, long size, String multipartUploadID, int partNumber) { + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + // Just set dummy size. - KeyArgs.Builder keyArgs = - KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName) - .setBucketName(bucketName) - .setDataSize(size) - .setMultipartNumber(partNumber) - .setMultipartUploadID(multipartUploadID) - .addAllKeyLocations(new ArrayList<>()); + KeyArgs.Builder keyArgs = KeyArgs.newBuilder().setVolumeName(volumeName) + .setKeyName(keyName) + .setBucketName(bucketName) + .setDataSize(size) + .setMultipartNumber(partNumber) + .setMultipartUploadID(multipartUploadID) + .addAllKeyLocations(new ArrayList<>()) + .addMetadata(HddsProtos.KeyValue.newBuilder() + .setKey(OzoneConsts.ETAG) + .setValue(DatatypeConverter.printHexBinary( + new DigestInputStream( + new ByteArrayInputStream( + RandomStringUtils.randomAlphanumeric((int) size) + .getBytes(StandardCharsets.UTF_8)), + eTagProvider) + .getMessageDigest().digest())) + .build()); // Just adding dummy list. As this is for UT only. MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index e0afb85f1cb..80946996aa8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -128,9 +129,14 @@ private String checkValidateAndUpdateCacheSuccess(String volumeName, List partList = new ArrayList<>(); - String partName = getPartName(volumeName, bucketName, keyName, - multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1) + String eTag = s3MultipartUploadCommitPartRequest.getOmRequest() + .getCommitMultiPartUploadRequest() + .getKeyArgs() + .getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) + .findFirst().get().getValue(); + partList.add(Part.newBuilder().setETag(eTag).setPartName(eTag).setPartNumber(1) .build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, @@ -219,10 +225,10 @@ public void testInvalidPartOrderError() throws Exception { String partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 23); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(23).build()); partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(1).build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, bucketName, keyName, multipartUploadID, partList); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index 23b543b6ec1..51963a00a1c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -287,7 +287,7 @@ public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseFSO( .setStatus(status).setSuccess(true) .setCommitMultiPartUploadResponse( OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse - .newBuilder().setPartName(volumeName)).build(); + .newBuilder().setETag(volumeName).setPartName(volumeName)).build(); return new S3MultipartUploadCommitPartResponseWithFSO(omResponse, multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java index b65cfd04844..8e9c1cabe9e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java @@ -19,11 +19,13 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmTestManagers; @@ -248,6 +250,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setMultipartUploadID(omMultipartInfo.getUploadID()) .setMultipartUploadPartNumber(i) .setAcls(Collections.emptyList()) + .addMetadata(OzoneConsts.ETAG, + DigestUtils.md5Hex(UUID.randomUUID().toString())) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) .setLocationInfoList(Collections.emptyList()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index fad99837e2b..87ef6a67df8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -27,6 +28,7 @@ import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.ExpiredOpenKeys; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -479,6 +481,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setLocationInfoList(Collections.emptyList()) + .addMetadata(OzoneConsts.ETAG, DigestUtils.md5Hex(UUID.randomUUID() + .toString())) .build(); writeClient.commitMultipartUploadPart(commitPartKeyArgs, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java index 47b59cfcc0e..8ae48ca4f83 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java @@ -21,6 +21,8 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; +import org.apache.hadoop.ozone.OzoneConsts; + import java.time.Instant; /** @@ -37,7 +39,7 @@ public class KeyMetadata { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; @XmlElement(name = "Size") diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java index 72289470c2c..af5eafc9f43 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java @@ -23,6 +23,8 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; + import java.util.ArrayList; import java.util.List; @@ -55,7 +57,7 @@ public static class Part { @XmlElement(name = "PartNumber") private int partNumber; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public int getPartNumber() { @@ -66,12 +68,12 @@ public void setPartNumber(int partNumber) { this.partNumber = partNumber; } - public String geteTag() { + public String getETag() { return eTag; } - public void seteTag(String eTag) { - this.eTag = eTag; + public void setETag(String eTagHash) { + this.eTag = eTagHash; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java index c636f36b175..2aa30d6b839 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java @@ -22,6 +22,7 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; /** * Complete Multipart Upload request response. @@ -41,7 +42,7 @@ public class CompleteMultipartUploadResponse { @XmlElement(name = "Key") private String key; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public String getLocation() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java index 6e114c2e0c6..d1136fe9ed7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -39,7 +40,7 @@ public class CopyObjectResponse { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java index c4e65aa38ff..ab30c1f0e7c 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java @@ -25,6 +25,7 @@ import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; import java.time.Instant; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; /** @@ -39,7 +40,7 @@ public class CopyPartResult { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public CopyPartResult() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 6f0f3c48472..5e7a3b9b599 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -65,6 +65,7 @@ import org.slf4j.LoggerFactory; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; @@ -74,8 +75,6 @@ */ public abstract class EndpointBase implements Auditor { - protected static final String ETAG = "ETag"; - protected static final String ETAG_CUSTOM = "etag-custom"; @Inject diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java index fc9da14133c..8f3fad73544 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -154,7 +155,7 @@ public static class Part { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 04c030530ce..6fc5de9879c 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -101,6 +101,7 @@ import java.util.OptionalLong; import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH; +import static javax.ws.rs.core.HttpHeaders.ETAG; import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; @@ -150,7 +151,7 @@ public class ObjectEndpoint extends EndpointBase { static { E_TAG_PROVIDER = ThreadLocal.withInitial(() -> { try { - return MessageDigest.getInstance("Md5"); + return MessageDigest.getInstance(OzoneConsts.MD5_HASH); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } @@ -808,7 +809,7 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; try { for (CompleteMultipartUploadRequest.Part part : partList) { - partsMap.put(part.getPartNumber(), part.geteTag()); + partsMap.put(part.getPartNumber(), part.getETag()); } if (LOG.isDebugEnabled()) { LOG.debug("Parts map {}", partsMap); @@ -956,6 +957,8 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge( sourceObject, ozoneOutputStream, 0, length); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); } } else { @@ -965,6 +968,8 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge(sourceObject, ozoneOutputStream); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); } } @@ -994,7 +999,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, assert keyOutputStream != null; OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = keyOutputStream.getCommitUploadPartInfo(); - String eTag = omMultipartCommitUploadPartInfo.getPartName(); + String eTag = omMultipartCommitUploadPartInfo.getETag(); if (copyHeader != null) { getMetrics().updateCopyObjectSuccessStats(startNanos); @@ -1065,7 +1070,7 @@ private Response listParts(String bucket, String key, String uploadID, ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> { ListPartsResponse.Part part = new ListPartsResponse.Part(); part.setPartNumber(partInfo.getPartNumber()); - part.setETag(partInfo.getPartName()); + part.setETag(partInfo.getETag()); part.setSize(partInfo.getSize()); part.setLastModified(Instant.ofEpochMilli( partInfo.getModificationTime())); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index dbc7f374a9a..12afbec31da 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -21,12 +21,11 @@ import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; @@ -110,7 +109,7 @@ public static Pair putKeyWithStream( eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) .toLowerCase(); perf.appendMetaLatencyNanos(metadataLatencyNs); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); } return Pair.of(eTag, writeLen); } @@ -163,11 +162,6 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, long startNanos = Time.monotonicNowNanos(); String eTag; S3GatewayMetrics metrics = S3GatewayMetrics.create(); - // OmMultipartCommitUploadPartInfo can only be gotten after the - // OzoneDataStreamOutput is closed, so we need to save the - // KeyDataStreamOutput in the OzoneDataStreamOutput and use it to get the - // OmMultipartCommitUploadPartInfo after OzoneDataStreamOutput is closed. - KeyDataStreamOutput keyDataStreamOutput = null; try { try (OzoneDataStreamOutput streamOutput = ozoneBucket .createMultipartStreamKey(key, length, partNumber, uploadID)) { @@ -176,11 +170,10 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, writeToStreamOutput(streamOutput, body, chunkSize, length); eTag = DatatypeConverter.printHexBinary( body.getMessageDigest().digest()).toLowerCase(); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); metrics.incPutKeySuccessLength(putLength); perf.appendMetaLatencyNanos(metadataLatencyNs); perf.appendSizeBytes(putLength); - keyDataStreamOutput = streamOutput.getKeyDataStreamOutput(); } } catch (OMException ex) { if (ex.getResult() == @@ -192,13 +185,7 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, ozoneBucket.getName() + "/" + key); } throw ex; - } finally { - if (keyDataStreamOutput != null) { - OmMultipartCommitUploadPartInfo commitUploadPartInfo = - keyDataStreamOutput.getCommitUploadPartInfo(); - eTag = commitUploadPartInfo.getPartName(); - } } - return Response.ok().header("ETag", eTag).build(); + return Response.ok().header(OzoneConsts.ETAG, eTag).build(); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index fad3386c61c..39ae9cc4af1 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -23,6 +23,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -32,6 +34,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -53,6 +56,8 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.util.Time; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; /** @@ -267,7 +272,8 @@ public void close() throws IOException { byte[] bytes = new byte[position]; buffer.get(bytes); - Part part = new Part(key + size, bytes); + Part part = new Part(key + size, bytes, + getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -425,7 +431,7 @@ public OzoneOutputStream createMultipartKey(String key, long size, @Override public void close() throws IOException { Part part = new Part(key + size, - toByteArray()); + toByteArray(), getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -463,7 +469,7 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, for (Map.Entry part: partsMap.entrySet()) { Part recordedPart = partsList.get(part.getKey()); if (recordedPart == null || - !recordedPart.getPartName().equals(part.getValue())) { + !recordedPart.getETag().equals(part.getValue())) { throw new OMException(ResultCodes.INVALID_PART); } else { output.write(recordedPart.getContent()); @@ -506,13 +512,21 @@ public OzoneMultipartUploadPartListParts listParts(String key, int count = 0; int nextPartNumberMarker = 0; boolean truncated = false; + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance(MD5_HASH); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } while (count < maxParts && partIterator.hasNext()) { Map.Entry partEntry = partIterator.next(); nextPartNumberMarker = partEntry.getKey(); if (partEntry.getKey() > partNumberMarker) { PartInfo partInfo = new PartInfo(partEntry.getKey(), partEntry.getValue().getPartName(), - Time.now(), partEntry.getValue().getContent().length); + Time.now(), partEntry.getValue().getContent().length, + DatatypeConverter.printHexBinary(eTagProvider.digest(partEntry + .getValue().getContent())).toLowerCase()); partInfoList.add(partInfo); count++; } @@ -563,9 +577,12 @@ public static class Part { private String partName; private byte[] content; - public Part(String name, byte[] data) { + private String eTag; + + public Part(String name, byte[] data, String eTag) { this.partName = name; this.content = data.clone(); + this.eTag = eTag; } public String getPartName() { @@ -575,6 +592,11 @@ public String getPartName() { public byte[] getContent() { return content.clone(); } + + public String getETag() { + return eTag; + } + } @Override diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java index 7bb35682d8d..b472320b7fe 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java @@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.client; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -65,6 +66,7 @@ public synchronized void close() throws IOException { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + getMetadata().get(OzoneConsts.ETAG)) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java index 0687a0fb8e2..ca3caa4ee77 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java @@ -22,6 +22,8 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -88,7 +90,8 @@ public KeyOutputStream getKeyOutputStream() { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + ((KeyMetadataAware)getOutputStream()).getMetadata().get(OzoneConsts.ETAG)) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java index ab87f9c98e1..cd0fbfed4e6 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java @@ -80,8 +80,8 @@ private void checkContent(CompleteMultipartUploadRequest request) { List parts = request.getPartList(); - assertEquals(part1, parts.get(0).geteTag()); - assertEquals(part2, parts.get(1).geteTag()); + assertEquals(part1, parts.get(0).getETag()); + assertEquals(part2, parts.get(1).getETag()); } private CompleteMultipartUploadRequest unmarshall( diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 35d713e4c4e..b1bc0863db4 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -79,17 +79,17 @@ public static void setUp() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 2, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 3, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index 5bbbe9f2bcc..c15b1d5cf47 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -93,9 +93,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; @@ -205,7 +205,7 @@ public void testMultipartInvalidPartError() throws Exception { Part part1 = uploadPart(key, uploadID, partNumber, content); // Change part name. - part1.seteTag("random"); + part1.setETag("random"); partsList.add(part1); content = "Multipart Upload 2"; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index 7b32fa421fa..09e4723b6bb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -32,6 +32,7 @@ import java.util.Map; import java.util.Scanner; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -91,7 +92,11 @@ public static void setUp() throws Exception { try (OutputStream stream = bucket .createKey(EXISTING_KEY, keyContent.length, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, - ReplicationFactor.THREE), new HashMap<>())) { + ReplicationFactor.THREE), + new HashMap() {{ + put(OzoneConsts.ETAG, DigestUtils.md5Hex(EXISTING_KEY_CONTENT)); + }} + )) { stream.write(keyContent); } @@ -327,9 +332,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; @@ -377,7 +382,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, assertNotNull(result.getETag()); assertNotNull(result.getLastModified()); Part part = new Part(); - part.seteTag(result.getETag()); + part.setETag(result.getETag()); part.setPartNumber(partNumber); return part; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index ca9a85180fa..7776ea9ed6b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -90,7 +90,7 @@ public void testPartUpload() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -112,16 +112,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 317be7f8f63..73efda94ae1 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; @@ -95,7 +96,7 @@ public void testPartUpload() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -116,16 +117,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } From 3dd1b15bfebb1598ee595f54a0b9e5335ee1872d Mon Sep 17 00:00:00 2001 From: Ivan Andika Date: Thu, 22 Feb 2024 18:47:58 +0800 Subject: [PATCH 2/5] HDDS-10395. Fix eTag compatibility issues for MPU (#6235) (cherry picked from commit 5f6306dd1dde3c9c982ed933aa1fa8f4fa7a9301) --- .../OzoneMultipartUploadPartListParts.java | 12 ++-- .../hadoop/ozone/om/helpers/OmPartInfo.java | 24 +++++--- .../hadoop/ozone/om/KeyManagerImpl.java | 14 ++++- .../hadoop/ozone/om/TestKeyManagerUnit.java | 57 ++++++++++++++++++- .../ozone/s3/endpoint/ObjectEndpoint.java | 11 +++- 5 files changed, 98 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java index 67f8edf3140..c085720d191 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java @@ -98,13 +98,13 @@ public ReplicationConfig getReplicationConfig() { /** * Class that represents each Part information of a multipart upload part. */ - public static class PartInfo { + public static final class PartInfo { - private int partNumber; - private String partName; - private long modificationTime; - private long size; - private String eTag; + private final int partNumber; + private final String partName; + private final long modificationTime; + private final long size; + private final String eTag; public PartInfo(int number, String name, long time, long size, String eTag) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java index e908c5a025f..35d97cd4ffd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java @@ -23,12 +23,12 @@ /** * Class that defines information about each part of a multipart upload key. */ -public class OmPartInfo { - private int partNumber; - private String partName; - private long modificationTime; - private long size; - private String eTag; +public final class OmPartInfo { + private final int partNumber; + private final String partName; + private final long modificationTime; + private final long size; + private final String eTag; public OmPartInfo(int number, String name, long time, long size, String eTag) { @@ -60,8 +60,14 @@ public String getETag() { } public PartInfo getProto() { - return PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName) - .setModificationTime(modificationTime) - .setSize(size).setETag(eTag).build(); + PartInfo.Builder builder = PartInfo.newBuilder() + .setPartNumber(partNumber) + .setPartName(partName) + .setModificationTime(modificationTime) + .setSize(size); + if (eTag != null) { + builder.setETag(eTag); + } + return builder.build(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index c7d675dddcb..c54750fee7d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -32,6 +32,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.Stack; import java.util.TreeMap; @@ -47,6 +48,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -788,13 +790,19 @@ public OmMultipartUploadListParts listParts(String volumeName, if (nextPartNumberMarker > partNumberMarker) { String partName = getPartName(partKeyInfo, volumeName, bucketName, keyName); + // Before HDDS-9680, MPU part does not have eTag metadata, for + // this case, we return null. The S3G will handle this case by + // using the MPU part name as the eTag field instead. + Optional eTag = partKeyInfo.getPartKeyInfo() + .getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(ETAG)) + .findFirst(); OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), partName, partKeyInfo.getPartKeyInfo().getModificationTime(), partKeyInfo.getPartKeyInfo().getDataSize(), - partKeyInfo.getPartKeyInfo().getMetadataList().stream() - .filter(keyValue -> keyValue.getKey().equals(ETAG)) - .findFirst().get().getValue()); + eTag.map(HddsProtos.KeyValue::getValue).orElse(null)); omPartInfoList.add(omPartInfo); //if there are parts, use replication type from one of the parts diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 795f933b971..ea58e832a0c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -65,6 +65,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -112,7 +113,7 @@ public class TestKeyManagerUnit { public static void setup() { ExitUtils.disableSystemExit(); } - + @BeforeEach public void init() throws Exception { configuration = new OzoneConfiguration(); @@ -154,6 +155,60 @@ public void listMultipartUploadPartsWithZeroUpload() throws IOException { omMultipartUploadListParts.getPartInfoList().size()); } + @Test + public void listMultipartUploadPartsWithoutEtagField() throws IOException { + // For backward compatibility reasons + final String volume = "vol1"; + final String bucket = "bucketForEtag"; + final String key = "dir/key1"; + createBucket(metadataManager, volume, bucket); + OmMultipartInfo omMultipartInfo = + initMultipartUpload(writeClient, volume, bucket, key); + + + // Commit some MPU parts without eTag field + for (int i = 1; i <= 5; i++) { + OmKeyArgs partKeyArgs = + new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(key) + .setIsMultipartKey(true) + .setMultipartUploadID(omMultipartInfo.getUploadID()) + .setMultipartUploadPartNumber(i) + .setAcls(Collections.emptyList()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .build(); + + OpenKeySession openKey = writeClient.openKey(partKeyArgs); + + OmKeyArgs commitPartKeyArgs = + new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(key) + .setIsMultipartKey(true) + .setMultipartUploadID(omMultipartInfo.getUploadID()) + .setMultipartUploadPartNumber(i) + .setAcls(Collections.emptyList()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .setLocationInfoList(Collections.emptyList()) + .build(); + + writeClient.commitMultipartUploadPart(commitPartKeyArgs, openKey.getId()); + } + + + OmMultipartUploadListParts omMultipartUploadListParts = keyManager + .listParts(volume, bucket, key, omMultipartInfo.getUploadID(), + 0, 10); + Assertions.assertEquals(5, + omMultipartUploadListParts.getPartInfoList().size()); + + } + @Test public void listMultipartUploads() throws IOException { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 6fc5de9879c..643d8551d82 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -1000,6 +1000,12 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = keyOutputStream.getCommitUploadPartInfo(); String eTag = omMultipartCommitUploadPartInfo.getETag(); + // If the OmMultipartCommitUploadPartInfo does not contain eTag, + // fall back to MPU part name for compatibility in case the (old) OM + // does not return the eTag field + if (StringUtils.isEmpty(eTag)) { + eTag = omMultipartCommitUploadPartInfo.getPartName(); + } if (copyHeader != null) { getMetrics().updateCopyObjectSuccessStats(startNanos); @@ -1070,7 +1076,10 @@ private Response listParts(String bucket, String key, String uploadID, ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> { ListPartsResponse.Part part = new ListPartsResponse.Part(); part.setPartNumber(partInfo.getPartNumber()); - part.setETag(partInfo.getETag()); + // If the ETag field does not exist, use MPU part name for backward + // compatibility + part.setETag(StringUtils.isNotEmpty(partInfo.getETag()) ? + partInfo.getETag() : partInfo.getPartName()); part.setSize(partInfo.getSize()); part.setLastModified(Instant.ofEpochMilli( partInfo.getModificationTime())); From 9a97366266b05fbfc08156d31bdd2e96198e25f9 Mon Sep 17 00:00:00 2001 From: Ivan Andika Date: Fri, 23 Feb 2024 20:35:43 +0800 Subject: [PATCH 3/5] HDDS-10403. CopyObject should set ETag based on the key content (#6251) (cherry picked from commit babf85c762ec159b6086145299f9a69b0a27f2ae) --- .../src/main/smoketest/s3/objectcopy.robot | 14 ++++++++++ .../ozone/s3/endpoint/ObjectEndpoint.java | 17 +++++++----- .../s3/endpoint/ObjectEndpointStreaming.java | 9 ++++--- .../hadoop/ozone/client/OzoneBucketStub.java | 2 +- .../ozone/s3/endpoint/TestObjectPut.java | 26 +++++++++++++++++++ 5 files changed, 57 insertions(+), 11 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot index 21764d65c44..af7571d35b8 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot @@ -37,15 +37,26 @@ Create Dest Bucket Copy Object Happy Scenario Run Keyword if '${DESTBUCKET}' == 'generated1' Create Dest Bucket Execute date > /tmp/copyfile + ${file_checksum} = Execute md5sum /tmp/copyfile | awk '{print $1}' + ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/copyobject/key=value/f1 --body /tmp/copyfile + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/copyobject/key=value/ Should contain ${result} f1 ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + ${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix ${PREFIX}/copyobject/key=value/ Should contain ${result} f1 #copying again will not throw error ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + ${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix ${PREFIX}/copyobject/key=value/ Should contain ${result} f1 @@ -56,8 +67,11 @@ Copy Object Where Bucket is not available Should contain ${result} NoSuchBucket Copy Object Where both source and dest are same with change to storageclass + ${file_checksum} = Execute md5sum /tmp/copyfile | awk '{print $1}' ${result} = Execute AWSS3APICli copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${DESTBUCKET}/${PREFIX}/copyobject/key=value/f1 Should contain ${result} ETag + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" Copy Object Where Key not available ${result} = Execute AWSS3APICli and checkrc copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/nonnonexistentkey 255 diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 643d8551d82..1d3850b12ac 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -1119,13 +1119,14 @@ void copy(OzoneVolume volume, InputStream src, long srcKeyLen, PerformanceStringBuilder perf, long startNanos) throws IOException { long copyLength; + src = new DigestInputStream(src, E_TAG_PROVIDER.get()); if (datastreamEnabled && !(replication != null && replication.getReplicationType() == EC) && srcKeyLen > datastreamMinLength) { perf.appendStreamMode(); copyLength = ObjectEndpointStreaming .copyKeyWithStream(volume.getBucket(destBucket), destKey, srcKeyLen, - chunkSize, replication, metadata, src, perf, startNanos); + chunkSize, replication, metadata, (DigestInputStream) src, perf, startNanos); } else { try (OzoneOutputStream dest = getClientProtocol() .createKey(volume.getName(), destBucket, destKey, srcKeyLen, @@ -1134,6 +1135,10 @@ void copy(OzoneVolume volume, InputStream src, long srcKeyLen, getMetrics().updateCopyKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); copyLength = IOUtils.copyLarge(src, dest); + String eTag = DatatypeConverter.printHexBinary( + ((DigestInputStream) src).getMessageDigest().digest()) + .toLowerCase(); + dest.getMetadata().put(ETAG, eTag); } } getMetrics().incCopyObjectSuccessLength(copyLength); @@ -1152,8 +1157,9 @@ private CopyObjectResponse copyObject(OzoneVolume volume, String sourceBucket = result.getLeft(); String sourceKey = result.getRight(); try { + OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( + volume.getName(), sourceBucket, sourceKey); // Checking whether we trying to copying to it self. - if (sourceBucket.equals(destBucket) && sourceKey .equals(destkey)) { // When copying to same storage type when storage type is provided, @@ -1172,15 +1178,12 @@ private CopyObjectResponse copyObject(OzoneVolume volume, // still does not support this just returning dummy response // for now CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); + copyObjectResponse.setETag(wrapInQuotes(sourceKeyDetails.getMetadata().get(ETAG))); copyObjectResponse.setLastModified(Instant.ofEpochMilli( Time.now())); return copyObjectResponse; } } - - OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( - volume.getName(), sourceBucket, sourceKey); long sourceKeyLen = sourceKeyDetails.getDataSize(); try (OzoneInputStream src = getClientProtocol().getKey(volume.getName(), @@ -1195,7 +1198,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume, getMetrics().updateCopyObjectSuccessStats(startNanos); CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); + copyObjectResponse.setETag(wrapInQuotes(destKeyDetails.getMetadata().get(ETAG))); copyObjectResponse.setLastModified(destKeyDetails.getModificationTime()); return copyObjectResponse; } catch (OMException ex) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index 12afbec31da..8be231f6d92 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -122,16 +122,19 @@ public static long copyKeyWithStream( int bufferSize, ReplicationConfig replicationConfig, Map keyMetadata, - InputStream body, PerformanceStringBuilder perf, long startNanos) + DigestInputStream body, PerformanceStringBuilder perf, long startNanos) throws IOException { - long writeLen = 0; + long writeLen; S3GatewayMetrics metrics = S3GatewayMetrics.create(); try (OzoneDataStreamOutput streamOutput = bucket.createStreamKey(keyPath, length, replicationConfig, keyMetadata)) { long metadataLatencyNs = metrics.updateCopyKeyMetadataStats(startNanos); - perf.appendMetaLatencyNanos(metadataLatencyNs); writeLen = writeToStreamOutput(streamOutput, body, bufferSize, length); + String eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) + .toLowerCase(); + perf.appendMetaLatencyNanos(metadataLatencyNs); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); } return writeLen; } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index 39ae9cc4af1..0cbe0781c4b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -209,7 +209,7 @@ public OzoneDataStreamOutput createStreamKey(String key, long size, Map keyMetadata) throws IOException { ByteBufferStreamOutput byteBufferStreamOutput = - new ByteBufferStreamOutput() { + new KeyMetadataAwareByteBufferStreamOutput(keyMetadata) { private final ByteBuffer buffer = ByteBuffer.allocate((int) size); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index f1da08013ae..89fe5b939dc 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -26,9 +26,11 @@ import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; @@ -52,6 +54,7 @@ import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Utils.urlEncode; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -107,9 +110,12 @@ public void testPutObject() throws IOException, OS3Exception { .readKey(keyName); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); } @Test @@ -135,9 +141,12 @@ public void testPutObjectWithECReplicationConfig() .readKey(keyName); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); } @Test @@ -207,9 +216,12 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { clientStub.getObjectStore().getS3Bucket(bucketName) .readKey(keyName); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); assertEquals(200, response.getStatus()); assertEquals("1234567890abcde", keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); } @Test @@ -229,10 +241,14 @@ public void testCopyObject() throws IOException, OS3Exception { .readKey(keyName); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + String sourceETag = keyDetails.getMetadata().get(OzoneConsts.ETAG); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( @@ -246,9 +262,19 @@ public void testCopyObject() throws IOException, OS3Exception { .readKey(destkey); keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails sourceKeyDetails = clientStub.getObjectStore() + .getS3Bucket(bucketName).getKey(keyName); + OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(destBucket).getKey(destkey); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + // Source key eTag should remain unchanged and the dest key should have + // the same Etag since the key content is the same + assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); // source and dest same OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( From ca90f5ff1aa5532f53bf84e75055ef40e4ee0f26 Mon Sep 17 00:00:00 2001 From: Ivan Andika <36403683+ivandika3@users.noreply.github.com> Date: Fri, 15 Mar 2024 11:43:10 +0800 Subject: [PATCH 4/5] HDDS-10521. ETag field should not be returned during GetObject if the key does not contain ETag field (#6377) (cherry picked from commit 3d193fc06ecdf86ea39a6f65790a29c1b60af959) --- .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 1d3850b12ac..dbdea9d9fdc 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -483,9 +483,12 @@ public Response get( responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal); } responseBuilder - .header(ETAG, wrapInQuotes(keyDetails.getMetadata().get(ETAG))) .header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT); + if (keyDetails.getMetadata().get(ETAG) != null) { + responseBuilder.header(ETAG, wrapInQuotes(keyDetails.getMetadata().get(ETAG))); + } + // if multiple query parameters having same name, // Only the first parameters will be recognized // eg: @@ -591,9 +594,16 @@ public Response head( } ResponseBuilder response = Response.ok().status(HttpStatus.SC_OK) - .header(ETAG, "" + wrapInQuotes(key.getMetadata().get(ETAG))) .header("Content-Length", key.getDataSize()) .header("Content-Type", "binary/octet-stream"); + + if (key.getMetadata().get(ETAG) != null) { + // Should not return ETag header if the ETag is not set + // doing so will result in "null" string being returned instead + // which breaks some AWS SDK implementation + response.header(ETAG, "" + wrapInQuotes(key.getMetadata().get(ETAG))); + } + addLastModifiedDate(response, key); addCustomMetadataHeaders(response, key); getMetrics().updateHeadKeySuccessStats(startNanos); From fc28224ee8d1451871817a0ff04e4d3535b8eca0 Mon Sep 17 00:00:00 2001 From: Ivan Andika Date: Wed, 27 Mar 2024 01:38:37 +0800 Subject: [PATCH 5/5] HDDS-10587. Reset ETag's thread-local MessageDigest instance on exception (#6435) (cherry picked from commit c6c611fa60592dc82e8191483b952a3bc3303f7a) --- hadoop-ozone/s3gateway/pom.xml | 5 ++ .../ozone/s3/endpoint/ObjectEndpoint.java | 66 ++++++++++----- .../ozone/s3/endpoint/TestObjectPut.java | 82 ++++++++++++++++++- .../ozone/s3/endpoint/TestPartUpload.java | 58 +++++++++++++ 4 files changed, 188 insertions(+), 23 deletions(-) diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index c308b42407e..ed3c50fdd9c 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -173,6 +173,11 @@ org.apache.commons commons-lang3 + + org.mockito + mockito-inline + test + diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index dbdea9d9fdc..d2fd56bac9f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -217,13 +217,14 @@ public Response put( @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, - InputStream body) throws IOException, OS3Exception { + final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; PerformanceStringBuilder perf = new PerformanceStringBuilder(); String copyHeader = null, storageType = null; + DigestInputStream digestInputStream = null; try { OzoneVolume volume = getVolume(); if (uploadID != null && !uploadID.equals("")) { @@ -297,11 +298,11 @@ public Response put( if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" .equals(headers.getHeaderString("x-amz-content-sha256"))) { - body = new DigestInputStream(new SignedChunksInputStream(body), - E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(new SignedChunksInputStream(body), + getMessageDigestInstance()); length = Long.parseLong(amzDecodedLength); } else { - body = new DigestInputStream(body, E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(body, getMessageDigestInstance()); } long putLength; @@ -310,7 +311,7 @@ public Response put( perf.appendStreamMode(); Pair keyWriteResult = ObjectEndpointStreaming .put(bucket, keyPath, length, replicationConfig, chunkSize, - customMetadata, (DigestInputStream) body, perf); + customMetadata, digestInputStream, perf); eTag = keyWriteResult.getKey(); putLength = keyWriteResult.getValue(); } else { @@ -320,9 +321,9 @@ public Response put( long metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); - putLength = IOUtils.copyLarge(body, output); + putLength = IOUtils.copyLarge(digestInputStream, output); eTag = DatatypeConverter.printHexBinary( - ((DigestInputStream) body).getMessageDigest().digest()) + digestInputStream.getMessageDigest().digest()) .toLowerCase(); output.getMetadata().put(ETAG, eTag); } @@ -367,6 +368,11 @@ public Response put( } throw ex; } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (digestInputStream != null) { + digestInputStream.getMessageDigest().reset(); + } if (auditSuccess) { long opLatencyNs = getMetrics().updateCreateKeySuccessStats(startNanos); perf.appendOpLatencyNanos(opLatencyNs); @@ -879,20 +885,21 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) private Response createMultipartKey(OzoneVolume volume, String bucket, String key, long length, int partNumber, String uploadID, - InputStream body, PerformanceStringBuilder perf) + final InputStream body, PerformanceStringBuilder perf) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); String copyHeader = null; + DigestInputStream digestInputStream = null; try { if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" .equals(headers.getHeaderString("x-amz-content-sha256"))) { - body = new DigestInputStream(new SignedChunksInputStream(body), - E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(new SignedChunksInputStream(body), + getMessageDigestInstance()); length = Long.parseLong( headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)); } else { - body = new DigestInputStream(body, E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(body, getMessageDigestInstance()); } copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); @@ -912,7 +919,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, perf.appendStreamMode(); return ObjectEndpointStreaming .createMultipartKey(ozoneBucket, key, length, partNumber, - uploadID, chunkSize, (DigestInputStream) body, perf); + uploadID, chunkSize, digestInputStream, perf); } // OmMultipartCommitUploadPartInfo can only be gotten after the // OzoneOutputStream is closed, so we need to save the KeyOutputStream @@ -993,10 +1000,10 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, partNumber, uploadID)) { metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); - putLength = IOUtils.copyLarge(body, ozoneOutputStream); + putLength = IOUtils.copyLarge(digestInputStream, ozoneOutputStream); ((KeyMetadataAware)ozoneOutputStream.getOutputStream()) .getMetadata().put(ETAG, DatatypeConverter.printHexBinary( - ((DigestInputStream) body).getMessageDigest().digest()) + digestInputStream.getMessageDigest().digest()) .toLowerCase()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); @@ -1042,6 +1049,12 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, throw os3Exception; } throw ex; + } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (digestInputStream != null) { + digestInputStream.getMessageDigest().reset(); + } } } @@ -1122,21 +1135,20 @@ public void setContext(ContainerRequestContext context) { } @SuppressWarnings("checkstyle:ParameterNumber") - void copy(OzoneVolume volume, InputStream src, long srcKeyLen, + void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, String destKey, String destBucket, ReplicationConfig replication, Map metadata, PerformanceStringBuilder perf, long startNanos) throws IOException { long copyLength; - src = new DigestInputStream(src, E_TAG_PROVIDER.get()); if (datastreamEnabled && !(replication != null && replication.getReplicationType() == EC) && srcKeyLen > datastreamMinLength) { perf.appendStreamMode(); copyLength = ObjectEndpointStreaming .copyKeyWithStream(volume.getBucket(destBucket), destKey, srcKeyLen, - chunkSize, replication, metadata, (DigestInputStream) src, perf, startNanos); + chunkSize, replication, metadata, src, perf, startNanos); } else { try (OzoneOutputStream dest = getClientProtocol() .createKey(volume.getName(), destBucket, destKey, srcKeyLen, @@ -1145,9 +1157,7 @@ void copy(OzoneVolume volume, InputStream src, long srcKeyLen, getMetrics().updateCopyKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); copyLength = IOUtils.copyLarge(src, dest); - String eTag = DatatypeConverter.printHexBinary( - ((DigestInputStream) src).getMessageDigest().digest()) - .toLowerCase(); + String eTag = DatatypeConverter.printHexBinary(src.getMessageDigest().digest()).toLowerCase(); dest.getMetadata().put(ETAG, eTag); } } @@ -1166,6 +1176,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume, String sourceBucket = result.getLeft(); String sourceKey = result.getRight(); + DigestInputStream sourceDigestInputStream = null; try { OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( volume.getName(), sourceBucket, sourceKey); @@ -1195,11 +1206,11 @@ private CopyObjectResponse copyObject(OzoneVolume volume, } } long sourceKeyLen = sourceKeyDetails.getDataSize(); - try (OzoneInputStream src = getClientProtocol().getKey(volume.getName(), sourceBucket, sourceKey)) { getMetrics().updateCopyKeyMetadataStats(startNanos); - copy(volume, src, sourceKeyLen, destkey, destBucket, replicationConfig, + sourceDigestInputStream = new DigestInputStream(src, getMessageDigestInstance()); + copy(volume, sourceDigestInputStream, sourceKeyLen, destkey, destBucket, replicationConfig, sourceKeyDetails.getMetadata(), perf, startNanos); } @@ -1221,6 +1232,12 @@ private CopyObjectResponse copyObject(OzoneVolume volume, destBucket + "/" + destkey, ex); } throw ex; + } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (sourceDigestInputStream != null) { + sourceDigestInputStream.getMessageDigest().reset(); + } } } @@ -1321,4 +1338,9 @@ private String wrapInQuotes(String value) { return "\"" + value + "\""; } + @VisibleForTesting + public MessageDigest getMessageDigestInstance() { + return E_TAG_PROVIDER.get(); + } + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index 89fe5b939dc..f71b09958ad 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -23,6 +23,8 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; +import java.security.MessageDigest; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.commons.io.IOUtils; @@ -47,6 +49,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; +import org.mockito.MockedStatic; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; @@ -57,10 +60,15 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; /** @@ -86,7 +94,7 @@ public void setup() throws IOException { clientStub.getObjectStore().createS3Bucket(destBucket); // Create PutObject and setClient to OzoneClientStub - objectEndpoint = new ObjectEndpoint(); + objectEndpoint = spy(new ObjectEndpoint()); objectEndpoint.setClient(clientStub); objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); } @@ -224,6 +232,31 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); } + @Test + public void testPutObjectMessageDigestResetDuringException() throws OS3Exception { + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // For example, EOFException during put-object due to client cancelling the operation before it completes + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + + HttpHeaders headers = mock(HttpHeaders.class); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + objectEndpoint.setHeaders(headers); + try { + objectEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + @Test public void testCopyObject() throws IOException, OS3Exception { // Put object in to source bucket @@ -312,6 +345,53 @@ public void testCopyObject() throws IOException, OS3Exception { assertTrue(e.getCode().contains("NoSuchBucket")); } + @Test + public void testCopyObjectMessageDigestResetDuringException() throws IOException, OS3Exception { + // Put object in to source bucket + HttpHeaders headers = mock(HttpHeaders.class); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + objectEndpoint.setHeaders(headers); + keyName = "sourceKey"; + + Response response = objectEndpoint.put(bucketName, keyName, + CONTENT.length(), 1, null, body); + + OzoneInputStream ozoneInputStream = clientStub.getObjectStore() + .getS3Bucket(bucketName) + .readKey(keyName); + + String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); + + assertEquals(200, response.getStatus()); + assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // Add the mocked methods only during the copy request + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + + // Add copy header, and then call put + when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( + bucketName + "/" + urlEncode(keyName)); + + try { + objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, + null, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + @Test public void testInvalidStorageType() throws IOException { HttpHeaders headers = Mockito.mock(HttpHeaders.class); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 7776ea9ed6b..364a601da1b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; @@ -29,12 +30,16 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.mockito.Mockito; +import org.mockito.MockedStatic; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.MessageDigest; import java.util.UUID; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; @@ -45,6 +50,12 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; /** @@ -196,6 +207,53 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { assertContentLength(uploadID, keyName, content.length()); } + @Test + public void testPartUploadMessageDigestResetDuringException() throws IOException, OS3Exception { + OzoneClient clientStub = new OzoneClientStub(); + clientStub.getObjectStore().createS3Bucket(OzoneConsts.S3_BUCKET); + + + HttpHeaders headers = mock(HttpHeaders.class); + when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( + "STANDARD"); + + ObjectEndpoint objectEndpoint = spy(new ObjectEndpoint()); + + objectEndpoint.setHeaders(headers); + objectEndpoint.setClient(clientStub); + objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + + Response response = objectEndpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + OzoneConsts.KEY); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); + + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // Add the mocked methods only during the copy request + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + + String content = "Multipart Upload"; + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + try { + objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 1, uploadID, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + private void assertContentLength(String uploadID, String key, long contentLength) throws IOException { OzoneMultipartUploadPartListParts parts =