diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java index 3cfab0f94b8..f0dd81c3bc3 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java @@ -83,7 +83,7 @@ public class TestOzoneECClient { private String volumeName = UUID.randomUUID().toString(); private String bucketName = UUID.randomUUID().toString(); private byte[][] inputChunks = new byte[dataBlocks][chunkSize]; - private final XceiverClientFactory factoryStub = + private final MockXceiverClientFactory factoryStub = new MockXceiverClientFactory(); private OzoneConfiguration conf = createConfiguration(); private MultiNodePipelineBlockAllocator allocator = @@ -148,7 +148,7 @@ public void testPutECKeyAndCheckDNStoredData() throws IOException { OzoneKey key = bucket.getKey(keyName); Assertions.assertEquals(keyName, key.getName()); Map storages = - ((MockXceiverClientFactory) factoryStub).getStorages(); + factoryStub.getStorages(); DatanodeDetails[] dnDetails = storages.keySet().toArray(new DatanodeDetails[storages.size()]); Arrays.sort(dnDetails); @@ -177,7 +177,7 @@ public void testPutECKeyAndCheckParityData() throws IOException { OzoneKey key = bucket.getKey(keyName); Assertions.assertEquals(keyName, key.getName()); Map storages = - ((MockXceiverClientFactory) factoryStub).getStorages(); + factoryStub.getStorages(); DatanodeDetails[] dnDetails = storages.keySet().toArray(new DatanodeDetails[storages.size()]); Arrays.sort(dnDetails); @@ -203,7 +203,7 @@ public void testPutECKeyAndReadContent() throws IOException { byte[] fileContent = new byte[chunkSize]; for (int i = 0; i < dataBlocks; i++) { Assertions.assertEquals(inputChunks[i].length, is.read(fileContent)); - Assertions.assertTrue(Arrays.equals(inputChunks[i], fileContent)); + Assertions.assertArrayEquals(fileContent, inputChunks[i]); } // A further read should give EOF Assertions.assertEquals(-1, is.read(fileContent)); @@ -325,7 +325,7 @@ public void testSmallerThanChunkSize() throws IOException { .getKeyLocationListList().get(0); Map storages = - ((MockXceiverClientFactory) factoryStub).getStorages(); + factoryStub.getStorages(); OzoneManagerProtocolProtos.KeyLocation keyLocations = blockList.getKeyLocations(0); @@ -380,11 +380,11 @@ public void testPutBlockHasBlockGroupLen() throws IOException { byte[] fileContent = new byte[chunkSize]; for (int i = 0; i < dataBlocks; i++) { Assertions.assertEquals(inputChunks[i].length, is.read(fileContent)); - Assertions.assertTrue(Arrays.equals(inputChunks[i], fileContent)); + Assertions.assertArrayEquals(fileContent, inputChunks[i]); } Map storages = - ((MockXceiverClientFactory) factoryStub).getStorages(); + factoryStub.getStorages(); OzoneManagerProtocolProtos.KeyLocationList blockList = transportStub.getKeys().get(volumeName).get(bucketName).get(keyName). getKeyLocationListList().get(0); @@ -561,11 +561,10 @@ public void testPartialStripeWithPartialLastChunk() byte[] fileContent = new byte[chunkSize]; for (int i = 0; i < 2; i++) { Assertions.assertEquals(inputChunks[i].length, is.read(fileContent)); - Assertions.assertTrue(Arrays.equals(inputChunks[i], fileContent)); + Assertions.assertArrayEquals(inputChunks[i], fileContent); } Assertions.assertEquals(lastChunk.length, is.read(fileContent)); - Assertions.assertTrue(Arrays.equals(lastChunk, - Arrays.copyOf(fileContent, lastChunk.length))); + Assertions.assertArrayEquals(lastChunk, Arrays.copyOf(fileContent, lastChunk.length)); // A further read should give EOF Assertions.assertEquals(-1, is.read(fileContent)); } @@ -602,8 +601,7 @@ public void test10D4PConfigWithPartialStripe() try (OzoneInputStream is = bucket.readKey(keyName)) { byte[] fileContent = new byte[chunkSize]; Assertions.assertEquals(inSize, is.read(fileContent)); - Assertions.assertTrue(Arrays.equals(partialChunk, - Arrays.copyOf(fileContent, inSize))); + Assertions.assertArrayEquals(partialChunk, Arrays.copyOf(fileContent, inSize)); } } @@ -648,8 +646,7 @@ public void testStripeWriteRetriesOn2Failures() throws Exception { nodesIndexesToMarkFailure); // It should have used 3rd block group also. So, total initialized nodes // count should be clusterSize. - Assertions.assertTrue(((MockXceiverClientFactory) factoryStub) - .getStorages().size() == clusterSize); + Assertions.assertEquals(clusterSize, factoryStub.getStorages().size()); } @Test @@ -669,8 +666,7 @@ public void testStripeWriteRetriesOn3Failures() throws Exception { nodesIndexesToMarkFailure); // It should have used 3rd block group also. So, total initialized nodes // count should be clusterSize. - Assertions.assertTrue(((MockXceiverClientFactory) factoryStub) - .getStorages().size() == clusterSize); + Assertions.assertEquals(clusterSize, factoryStub.getStorages().size()); } // The mocked impl throws IllegalStateException when there are not enough @@ -742,8 +738,7 @@ public void testStripeWriteRetriesOnFailures(OzoneConfiguration con, out.write(inputChunks[i]); } waitForFlushingThreadToFinish((ECKeyOutputStream) out.getOutputStream()); - Assertions.assertTrue( - ((MockXceiverClientFactory) factoryStub).getStorages().size() == 5); + Assertions.assertEquals(5, factoryStub.getStorages().size()); List failedDNs = new ArrayList<>(); List dns = blkAllocator.getClusterDns(); @@ -753,7 +748,7 @@ public void testStripeWriteRetriesOnFailures(OzoneConfiguration con, } // First let's set storage as bad - ((MockXceiverClientFactory) factoryStub).setFailedStorages(failedDNs); + factoryStub.setFailedStorages(failedDNs); // Writer should be able to write by using 3rd block group. for (int i = 0; i < numChunksToWriteAfterFailure; i++) { @@ -770,15 +765,11 @@ public void testStripeWriteRetriesOnFailures(OzoneConfiguration con, byte[] fileContent = new byte[chunkSize]; for (int i = 0; i < dataBlocks; i++) { Assertions.assertEquals(inputChunks[i].length, is.read(fileContent)); - Assertions.assertTrue(Arrays.equals(inputChunks[i], fileContent), - "Expected: " + new String(inputChunks[i], - UTF_8) + " \n " + "Actual: " + new String(fileContent, UTF_8)); + Assertions.assertArrayEquals(inputChunks[i], fileContent); } for (int i = 0; i < numChunksToWriteAfterFailure; i++) { Assertions.assertEquals(inputChunks[i].length, is.read(fileContent)); - Assertions.assertTrue(Arrays.equals(inputChunks[i], fileContent), - "Expected: " + new String(inputChunks[i], - UTF_8) + " \n " + "Actual: " + new String(fileContent, UTF_8)); + Assertions.assertArrayEquals(inputChunks[i], fileContent); } } } @@ -808,7 +799,7 @@ public void testNodeFailuresWhileWriting(int[] nodesIndexesToMarkFailure, } // First let's set storage as bad - ((MockXceiverClientFactory) factoryStub).setFailedStorages(failedDNs); + factoryStub.setFailedStorages(failedDNs); for (int i = 0; i < numChunksToWriteAfterFailure; i++) { out.write(inputChunks[i % dataBlocks]); @@ -825,17 +816,12 @@ public void testNodeFailuresWhileWriting(int[] nodesIndexesToMarkFailure, byte[] fileContent = new byte[chunkSize]; for (int i = 0; i < dataBlocks; i++) { Assertions.assertEquals(inputChunks[i].length, is.read(fileContent)); - Assertions.assertTrue(Arrays.equals(inputChunks[i], fileContent), - "Expected: " + new String(inputChunks[i], - UTF_8) + " \n " + "Actual: " + new String(fileContent, UTF_8)); + Assertions.assertArrayEquals(inputChunks[i], fileContent); } for (int i = 0; i < numChunksToWriteAfterFailure; i++) { Assertions.assertEquals(inputChunks[i % dataBlocks].length, is.read(fileContent)); - Assertions.assertTrue( - Arrays.equals(inputChunks[i % dataBlocks], fileContent), - "Expected: " + new String(inputChunks[i % dataBlocks], - UTF_8) + " \n " + "Actual: " + new String(fileContent, UTF_8)); + Assertions.assertArrayEquals(inputChunks[i % dataBlocks], fileContent); } } } @@ -888,14 +874,14 @@ private void testExcludeFailedDN(IntStream failedDNIndex, List closedDNs = closedDNIndex .mapToObj(i -> DatanodeDetails.getFromProtoBuf(dns.get(i))) .collect(Collectors.toList()); - ((MockXceiverClientFactory) factoryStub).mockStorageFailure(closedDNs, + factoryStub.mockStorageFailure(closedDNs, new ContainerNotOpenException("Mocked")); // Then let's mark failed datanodes List failedDNs = failedDNIndex .mapToObj(i -> DatanodeDetails.getFromProtoBuf(dns.get(i))) .collect(Collectors.toList()); - ((MockXceiverClientFactory) factoryStub).setFailedStorages(failedDNs); + factoryStub.setFailedStorages(failedDNs); for (int i = 0; i < dataBlocks; i++) { out.write(inputChunks[i % dataBlocks]); @@ -955,7 +941,7 @@ public void testLargeWriteOfMultipleStripesWithStripeFailure() } // First let's set storage as bad - ((MockXceiverClientFactory) factoryStub).setFailedStorages(failedDNs); + factoryStub.setFailedStorages(failedDNs); for (int i = 0; i < numChunksToWriteAfterFailure; i++) { out.write(inputChunks[i % dataBlocks]); @@ -974,18 +960,12 @@ public void testLargeWriteOfMultipleStripesWithStripeFailure() for (int i = 0; i < dataBlocks * numFullStripesBeforeFailure; i++) { Assertions.assertEquals(inputChunks[i % dataBlocks].length, is.read(fileContent)); - Assertions.assertTrue( - Arrays.equals(inputChunks[i % dataBlocks], fileContent), - "Expected: " + new String(inputChunks[i % dataBlocks], UTF_8) - + " \n " + "Actual: " + new String(fileContent, UTF_8)); + Assertions.assertArrayEquals(inputChunks[i % dataBlocks], fileContent); } for (int i = 0; i < numChunksToWriteAfterFailure; i++) { Assertions.assertEquals(inputChunks[i % dataBlocks].length, is.read(fileContent)); - Assertions.assertTrue( - Arrays.equals(inputChunks[i % dataBlocks], fileContent), - "Expected: " + new String(inputChunks[i % dataBlocks], - UTF_8) + " \n " + "Actual: " + new String(fileContent, UTF_8)); + Assertions.assertArrayEquals(inputChunks[i % dataBlocks], fileContent); } } } @@ -1033,7 +1013,7 @@ public void testPartialStripeWithPartialChunkRetry() } // First let's set storage as bad - ((MockXceiverClientFactory) factoryStub).setFailedStorages(failedDNs); + factoryStub.setFailedStorages(failedDNs); } @@ -1042,16 +1022,13 @@ public void testPartialStripeWithPartialChunkRetry() for (int i = 0; i < numFullChunks; i++) { Assertions.assertEquals(inputChunks[i % dataBlocks].length, is.read(fileContent)); - Assertions.assertTrue( - Arrays.equals(inputChunks[i % dataBlocks], fileContent), - "Expected: " + new String(inputChunks[i % dataBlocks], - UTF_8) + " \n " + "Actual: " + new String(fileContent, UTF_8)); + Assertions.assertArrayEquals(inputChunks[i % dataBlocks], fileContent); } byte[] partialChunkToRead = new byte[partialChunkSize]; Assertions .assertEquals(partialChunkToRead.length, is.read(partialChunkToRead)); - Assertions.assertTrue(Arrays.equals(partialChunk, partialChunkToRead)); + Assertions.assertArrayEquals(partialChunk, partialChunkToRead); Assertions.assertEquals(-1, is.read(partialChunkToRead)); } @@ -1126,7 +1103,7 @@ public void testDiscardPreAllocatedBlocksPreventRetryExceeds() .getFromProtoBuf(dns.get(nodesIndexesToMarkFailure[j]))); } // First let's set storage as bad - ((MockXceiverClientFactory) factoryStub).setFailedStorages(failedDNs); + factoryStub.setFailedStorages(failedDNs); // Writes that will retry due to failed DNs try { @@ -1151,9 +1128,7 @@ public void testDiscardPreAllocatedBlocksPreventRetryExceeds() for (int i = 0; i < dataBlocks * numStripesTotal; i++) { Assertions.assertEquals(inputChunks[i % dataBlocks].length, is.read(fileContent)); - Assertions.assertArrayEquals(inputChunks[i % dataBlocks], fileContent, - "Expected: " + new String(inputChunks[i % dataBlocks], UTF_8) - + " \n " + "Actual: " + new String(fileContent, UTF_8)); + Assertions.assertArrayEquals(inputChunks[i % dataBlocks], fileContent); } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 554d8104f02..01844acbb6a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -70,7 +70,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.params.provider.Arguments.arguments; @@ -282,8 +282,7 @@ public void testListBuckets() throws Exception { for (OmBucketInfo omBucketInfo : omBucketInfoList) { assertTrue(omBucketInfo.getBucketName().startsWith( prefixBucketNameWithOzoneOwner)); - assertFalse(omBucketInfo.getBucketName().equals( - prefixBucketNameWithOzoneOwner + 10)); + assertNotEquals(prefixBucketNameWithOzoneOwner + 10, omBucketInfo.getBucketName()); } @@ -426,8 +425,7 @@ public void testListKeys() throws Exception { for (OmKeyInfo omKeyInfo : omKeyInfoList) { assertTrue(omKeyInfo.getKeyName().startsWith( prefixKeyA)); - assertFalse(omKeyInfo.getBucketName().equals( - prefixKeyA + 38)); + assertNotEquals(prefixKeyA + 38, omKeyInfo.getBucketName()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 4704622dd52..6659ef852f2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -348,17 +348,17 @@ public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { S3SecretCache cache = secretManager.cache(); // Check if all the three secrets are cached. - Assertions.assertTrue(cache.get(userPrincipalId1) != null); - Assertions.assertTrue(cache.get(userPrincipalId2) != null); - Assertions.assertTrue(cache.get(userPrincipalId3) != null); + Assertions.assertNotNull(cache.get(userPrincipalId1)); + Assertions.assertNotNull(cache.get(userPrincipalId2)); + Assertions.assertNotNull(cache.get(userPrincipalId3)); // Flush the current buffer. doubleBuffer.flushCurrentBuffer(); // Check if all the three secrets are cleared from the cache. - Assertions.assertTrue(cache.get(userPrincipalId3) == null); - Assertions.assertTrue(cache.get(userPrincipalId2) == null); - Assertions.assertTrue(cache.get(userPrincipalId1) == null); + Assertions.assertNull(cache.get(userPrincipalId3)); + Assertions.assertNull(cache.get(userPrincipalId2)); + Assertions.assertNull(cache.get(userPrincipalId1)); } finally { // cleanup metrics OzoneManagerDoubleBufferMetrics metrics = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java index ca8bc4f8daa..7c380fa7a4e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java @@ -39,7 +39,7 @@ public void testContainerKeyPrefixCodec() throws IOException { Codec codec = ContainerKeyPrefixCodec.get(); byte[] persistedFormat = codec.toPersistedFormat(containerKeyPrefix); - Assertions.assertTrue(persistedFormat != null); + Assertions.assertNotNull(persistedFormat); ContainerKeyPrefix fromPersistedFormat = codec.fromPersistedFormat(persistedFormat); Assertions.assertEquals(containerKeyPrefix, fromPersistedFormat); @@ -50,7 +50,7 @@ public void testIntegerCodec() throws IOException { Integer i = 1000; Codec codec = IntegerCodec.get(); byte[] persistedFormat = codec.toPersistedFormat(i); - Assertions.assertTrue(persistedFormat != null); + Assertions.assertNotNull(persistedFormat); Integer fromPersistedFormat = codec.fromPersistedFormat(persistedFormat); Assertions.assertEquals(i, fromPersistedFormat); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestBlocksEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestBlocksEndPoint.java index b1dfed99298..470403540c2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestBlocksEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestBlocksEndPoint.java @@ -253,14 +253,14 @@ public void testGetBlocksPendingDeletionPrevKeyParam() throws Exception { containerStateBlockInfoListMap = (Map>) blocksPendingDeletion.getEntity(); - Assertions.assertTrue(containerStateBlockInfoListMap.size() == 0); + Assertions.assertEquals(0, containerStateBlockInfoListMap.size()); blocksPendingDeletion = blocksEndPoint.getBlocksPendingDeletion(1, 4); containerStateBlockInfoListMap = (Map>) blocksPendingDeletion.getEntity(); - Assertions.assertTrue(containerStateBlockInfoListMap.size() == 0); + Assertions.assertEquals(0, containerStateBlockInfoListMap.size()); } protected ContainerWithPipeline getTestContainer( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestFeaturesEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestFeaturesEndPoint.java index 00732561a51..413753cf16c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestFeaturesEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestFeaturesEndPoint.java @@ -113,7 +113,7 @@ public void testNoDisabledFeatures() { List allDisabledFeatures = (List) disabledFeatures.getEntity(); Assertions.assertNotNull(allDisabledFeatures); - Assertions.assertTrue(allDisabledFeatures.size() == 0); + Assertions.assertEquals(0, allDisabledFeatures.size()); } @Test @@ -141,6 +141,6 @@ public void testGetHeatMapNotInDisabledFeaturesListWhenHeatMapFlagIsTrue() { List allDisabledFeatures = (List) disabledFeatures.getEntity(); Assertions.assertNotNull(allDisabledFeatures); - Assertions.assertTrue(allDisabledFeatures.size() == 0); + Assertions.assertEquals(0, allDisabledFeatures.size()); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java index 35196a748b5..753804e5fab 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java @@ -43,6 +43,7 @@ import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; /** * This is a utility class for common code for test cases. @@ -185,8 +186,8 @@ public void testNSSummaryBasicInfoNoPath( NamespaceSummaryResponse invalidObj = (NamespaceSummaryResponse) invalidResponse.getEntity(); assertEquals(ResponseStatus.PATH_NOT_FOUND, invalidObj.getStatus()); - assertEquals(null, invalidObj.getCountStats()); - assertEquals(null, invalidObj.getObjectDBInfo()); + assertNull(invalidObj.getCountStats()); + assertNull(invalidObj.getObjectDBInfo()); } public void testNSSummaryBasicInfoKey( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java index 4e86b72709b..a46dbc0d052 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java @@ -78,7 +78,7 @@ public void testHealthyContainer() { assertFalse(status.isUnderReplicated()); assertEquals(0, status.replicaDelta()); assertFalse(status.isMissing()); - assertEquals(false, status.isMisReplicated()); + assertFalse(status.isMisReplicated()); assertEquals(0, status.misReplicatedDelta()); assertEquals(container, status.getContainer()); @@ -102,7 +102,7 @@ public void testHealthyContainerWithExtraUnhealthyReplica() { assertFalse(status.isUnderReplicated()); assertEquals(0, status.replicaDelta()); assertFalse(status.isMissing()); - assertEquals(false, status.isMisReplicated()); + assertFalse(status.isMisReplicated()); assertEquals(0, status.misReplicatedDelta()); } @@ -117,7 +117,7 @@ public void testMissingContainer() { assertFalse(status.isUnderReplicated()); assertEquals(3, status.replicaDelta()); assertTrue(status.isMissing()); - assertEquals(false, status.isMisReplicated()); + assertFalse(status.isMisReplicated()); assertEquals(0, status.misReplicatedDelta()); } @@ -133,7 +133,7 @@ public void testUnderReplicatedContainer() { assertFalse(status.isOverReplicated()); assertTrue(status.isUnderReplicated()); assertEquals(2, status.replicaDelta()); - assertEquals(false, status.isMisReplicated()); + assertFalse(status.isMisReplicated()); assertEquals(0, status.misReplicatedDelta()); } @@ -152,7 +152,7 @@ public void testOverReplicatedContainer() { assertFalse(status.isUnderReplicated()); assertTrue(status.isOverReplicated()); assertEquals(-1, status.replicaDelta()); - assertEquals(false, status.isMisReplicated()); + assertFalse(status.isMisReplicated()); assertEquals(0, status.misReplicatedDelta()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index cd9b3882f74..05d87e01143 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -176,8 +176,7 @@ public void testDeleteKeys() throws IOException, OS3Exception { MultiDeleteResponse response = bucketEndpoint.multiDelete("BucketName", "keyName", request); assertEquals(1, response.getErrors().size()); - assertTrue( - response.getErrors().get(0).getCode().equals("PermissionDenied")); + assertEquals("PermissionDenied", response.getErrors().get(0).getCode()); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java index 3e9a289c761..c9402a60ad8 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java @@ -21,6 +21,8 @@ import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test class to test RangeHeaderParserUtil. @@ -37,60 +39,60 @@ public void testRangeHeaderParser() { rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=0-8", 10); assertEquals(0, rangeHeader.getStartOffset()); assertEquals(8, rangeHeader.getEndOffset()); - assertEquals(false, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); + assertFalse(rangeHeader.isReadFull()); + assertFalse(rangeHeader.isInValidRange()); //range is with in file length, both start and end offset are same rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=0-0", 10); assertEquals(0, rangeHeader.getStartOffset()); assertEquals(0, rangeHeader.getEndOffset()); - assertEquals(false, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); + assertFalse(rangeHeader.isReadFull()); + assertFalse(rangeHeader.isInValidRange()); //range is not with in file length, both start and end offset are greater // than length rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=11-10", 10); - assertEquals(true, rangeHeader.isInValidRange()); + assertTrue(rangeHeader.isInValidRange()); // range is satisfying, one of the range is with in the length. So, read // full file rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=11-8", 10); assertEquals(0, rangeHeader.getStartOffset()); assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(true, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); + assertTrue(rangeHeader.isReadFull()); + assertFalse(rangeHeader.isInValidRange()); // bytes spec is wrong rangeHeader = RangeHeaderParserUtil.parseRangeHeader("mb=11-8", 10); assertEquals(0, rangeHeader.getStartOffset()); assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(true, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); + assertTrue(rangeHeader.isReadFull()); + assertFalse(rangeHeader.isInValidRange()); // range specified is invalid rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=-11-8", 10); assertEquals(0, rangeHeader.getStartOffset()); assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(true, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); + assertTrue(rangeHeader.isReadFull()); + assertFalse(rangeHeader.isInValidRange()); //Last n bytes rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=-6", 10); assertEquals(4, rangeHeader.getStartOffset()); assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(false, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); + assertFalse(rangeHeader.isReadFull()); + assertFalse(rangeHeader.isInValidRange()); rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=-106", 10); assertEquals(0, rangeHeader.getStartOffset()); assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(false, rangeHeader.isInValidRange()); + assertFalse(rangeHeader.isInValidRange()); rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=3977248768" + "-4977248768", 4977248769L); assertEquals(3977248768L, rangeHeader.getStartOffset()); assertEquals(4977248768L, rangeHeader.getEndOffset()); - assertEquals(false, rangeHeader.isInValidRange()); + assertFalse(rangeHeader.isInValidRange()); }