From ffe71985e63bc50c243db72e0fa61fd443dfd638 Mon Sep 17 00:00:00 2001 From: Ivan Zlenko <241953+ivanzlenko@users.noreply.github.com> Date: Thu, 10 Oct 2024 22:13:06 +0500 Subject: [PATCH] HDDS-11544. Improve work with arrays (#7286) --- .../utils/CertificateSignRequest.java | 11 ++--- .../hadoop/ozone/HddsPolicyProvider.java | 4 +- .../server/ratis/ContainerStateMachine.java | 18 ++++---- .../ECContainerOperationClient.java | 12 ++---- .../ozone/erasurecode/CodecRegistry.java | 7 ++-- .../hdds/utils/DBCheckpointServlet.java | 14 +++---- .../balancer/ContainerBalancerTask.java | 2 +- .../SCMContainerPlacementRackAware.java | 3 +- .../hdds/scm/block/TestBlockManager.java | 42 +++++++++---------- .../hdds/scm/block/TestDeletedBlockLog.java | 2 +- ...TestIncrementalContainerReportHandler.java | 2 +- .../ozone/client/TestOzoneECClient.java | 4 +- .../commandhandler/TestBlockDeletion.java | 31 +++++++------- .../hadoop/ozone/om/KeyManagerImpl.java | 24 +++++------ .../fs/ozone/BasicOzoneClientAdapterImpl.java | 17 ++++---- .../BasicRootedOzoneClientAdapterImpl.java | 19 ++++----- 16 files changed, 94 insertions(+), 118 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java index 1f04e868a85..553b1dc812e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java @@ -27,13 +27,12 @@ import java.util.List; import java.util.Optional; +import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.commons.validator.routines.DomainValidator; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; - -import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.bouncycastle.asn1.ASN1EncodableVector; import org.bouncycastle.asn1.ASN1Object; @@ -390,7 +389,7 @@ private Optional getSubjectAltNameExtension() throws if (altNames != null) { return Optional.of(new Extension(Extension.subjectAlternativeName, false, new DEROctetString(new GeneralNames( - altNames.toArray(new GeneralName[altNames.size()]))))); + altNames.toArray(new GeneralName[0]))))); } return Optional.empty(); } @@ -414,12 +413,10 @@ private Extensions createExtensions() throws IOException { // Add subject alternate name extension Optional san = getSubjectAltNameExtension(); - if (san.isPresent()) { - extensions.add(san.get()); - } + san.ifPresent(extensions::add); return new Extensions( - extensions.toArray(new Extension[extensions.size()])); + extensions.toArray(new Extension[0])); } public CertificateSignRequest build() throws SCMSecurityException { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java index eeed4fab5f7..52217ce7f83 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java @@ -24,7 +24,7 @@ import org.apache.hadoop.security.authorize.Service; import org.apache.ratis.util.MemoizedSupplier; -import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.function.Supplier; @@ -50,7 +50,7 @@ public static HddsPolicyProvider getInstance() { } private static final List DN_SERVICES = - Arrays.asList( + Collections.singletonList( new Service( OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, ReconfigureProtocol.class) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index be566f84fc9..e7eb0aa7817 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -41,8 +41,9 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; -import java.util.stream.Collectors; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -69,21 +70,18 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto; import org.apache.ratis.proto.RaftProtos.LogEntryProto; import org.apache.ratis.proto.RaftProtos.RaftPeerRole; import org.apache.ratis.proto.RaftProtos.RoleInfoProto; +import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto; import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; import org.apache.ratis.protocol.Message; import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; -import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.protocol.TermIndex; @@ -98,10 +96,10 @@ import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat; +import org.apache.ratis.util.JavaUtils; import org.apache.ratis.util.LifeCycle; import org.apache.ratis.util.TaskQueue; import org.apache.ratis.util.function.CheckedSupplier; -import org.apache.ratis.util.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -818,11 +816,9 @@ private ByteString readStateMachineData( */ @Override public CompletableFuture flush(long index) { - List> futureList = - writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) - .map(Map.Entry::getValue).collect(Collectors.toList()); return CompletableFuture.allOf( - futureList.toArray(new CompletableFuture[futureList.size()])); + writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) + .map(Map.Entry::getValue).toArray(CompletableFuture[]::new)); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java index 487e6d37b28..95b7d06167f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java @@ -18,11 +18,13 @@ package org.apache.hadoop.ozone.container.ec.reconstruction; import com.google.common.collect.ImmutableList; +import jakarta.annotation.Nonnull; import org.apache.commons.collections.map.SingletonMap; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; @@ -34,8 +36,6 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; -import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +44,6 @@ import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; /** * This class wraps necessary container-level rpc calls @@ -93,14 +92,11 @@ public BlockData[] listBlock(long containerId, DatanodeDetails dn, try { return BlockData.getFromProtoBuf(i); } catch (IOException e) { - LOG.debug("Failed while converting to protobuf BlockData. Returning" - + " null for listBlock from DN: " + dn, - e); + LOG.debug("Failed while converting to protobuf BlockData. Returning null for listBlock from DN: {}", dn, e); // TODO: revisit here. return null; } - }).collect(Collectors.toList()) - .toArray(new BlockData[blockDataList.size()]); + }).toArray(BlockData[]::new); } finally { this.xceiverClientManager.releaseClient(xceiverClient, false); } diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java index 83650c132b0..2069a51be17 100644 --- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java +++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java @@ -19,9 +19,9 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory; import org.apache.ozone.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; import org.apache.ozone.erasurecode.rawcoder.NativeXORRawErasureCoderFactory; +import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +31,6 @@ import java.util.Map; import java.util.ServiceLoader; import java.util.Set; -import java.util.stream.Collectors; /** * This class registers all coder implementations. @@ -108,8 +107,8 @@ void updateCoders(Iterable coderFactories) { String codecName = entry.getKey(); List coders = entry.getValue(); coderNameMap.put(codecName, coders.stream(). - map(RawErasureCoderFactory::getCoderName). - collect(Collectors.toList()).toArray(new String[0])); + map(RawErasureCoderFactory::getCoderName) + .toArray(String[]::new)); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java index 2d718628e1e..cb1fdd3375a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java @@ -41,22 +41,20 @@ import org.apache.commons.fileupload.servlet.ServletFileUpload; import org.apache.commons.fileupload.util.Streams; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBStore; - -import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.utils.HddsServerUtil.writeDBCheckpointToStream; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST; import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX; -import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Provides the current checkpoint Snapshot of the OM/SCM DB. (tar) */ @@ -287,7 +285,7 @@ private static String[] parseFormDataParameters(HttpServletRequest request) { LOG.warn("Exception occured during form data parsing {}", e.getMessage()); } - return sstParam.size() == 0 ? null : sstParam.toArray(new String[0]); + return sstParam.isEmpty() ? null : sstParam.toArray(new String[0]); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 19a2f3c2e62..c3b76dc4497 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -691,7 +691,7 @@ private void checkIterationMoveResults() { moveSelectionToFutureMap.values(); if (!futures.isEmpty()) { CompletableFuture allFuturesResult = CompletableFuture.allOf( - futures.toArray(new CompletableFuture[futures.size()])); + futures.toArray(new CompletableFuture[0])); try { allFuturesResult.get(config.getMoveTimeout().toMillis(), TimeUnit.MILLISECONDS); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 7fec06e7e06..1c2b5a3be39 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -346,8 +346,7 @@ protected List chooseDatanodesInternalLegacy( return chooseNodes(null, chosenNodes, mutableFavoredNodes, mutableUsedNodes, favorIndex, nodesRequired, mapSizeRequired); } else { - List mutableExcludedNodes = new ArrayList<>(); - mutableExcludedNodes.addAll(excludedNodes); + List mutableExcludedNodes = new ArrayList<>(excludedNodes); // choose node to meet replication requirement // case 1: one excluded node, choose one on the same rack as the excluded // node, choose others on different racks. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 6438b6f8d49..621c9297e7e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -22,15 +22,15 @@ import java.time.Clock; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.ArrayList; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -39,30 +39,30 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; -import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; -import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; -import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; -import org.apache.hadoop.hdds.scm.ha.SCMContext; -import org.apache.hadoop.hdds.scm.ha.SCMHAManager; -import org.apache.hadoop.hdds.scm.node.NodeStatus; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; +import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerImpl; +import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; @@ -76,21 +76,19 @@ import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand; import org.apache.ozone.test.GenericTestUtils; - -import static org.apache.hadoop.ozone.OzoneConsts.GB; -import static org.apache.hadoop.ozone.OzoneConsts.MB; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; - import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; +import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.OzoneConsts.MB; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests for SCM Block Manager. @@ -273,7 +271,7 @@ void testAllocateBlockInParallel() throws Exception { } CompletableFuture - .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) + .allOf(futureList.toArray(new CompletableFuture[0])) .get(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 03500529ff9..c8e2f267aff 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -265,7 +265,7 @@ private void commitTransactions( List transactionResults) throws IOException { commitTransactions(transactionResults, - dnList.toArray(new DatanodeDetails[3])); + dnList.toArray(new DatanodeDetails[0])); } private void commitTransactions( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java index dbcccce598c..4cf9eccc4fa 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java @@ -644,7 +644,7 @@ public void testECReplicaIndexValidation() throws NodeNotFoundException, IOException, TimeoutException { List dns = IntStream.range(0, 5) .mapToObj(i -> randomDatanodeDetails()).collect(Collectors.toList()); - dns.stream().forEach(dn -> nodeManager.register(dn, null, null)); + dns.forEach(dn -> nodeManager.register(dn, null, null)); ECReplicationConfig replicationConfig = new ECReplicationConfig(3, 2); final ContainerInfo container = getECContainer(LifeCycleState.CLOSED, PipelineID.randomId(), replicationConfig); diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java index 25a3ad2d9c8..1b67f024bbe 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java @@ -153,7 +153,7 @@ public void testPutECKeyAndCheckDNStoredData() throws IOException { Map storages = factoryStub.getStorages(); DatanodeDetails[] dnDetails = - storages.keySet().toArray(new DatanodeDetails[storages.size()]); + storages.keySet().toArray(new DatanodeDetails[0]); Arrays.sort(dnDetails); for (int i = 0; i < inputChunks.length; i++) { MockDatanodeStorage datanodeStorage = storages.get(dnDetails[i]); @@ -182,7 +182,7 @@ public void testPutECKeyAndCheckParityData() throws IOException { Map storages = factoryStub.getStorages(); DatanodeDetails[] dnDetails = - storages.keySet().toArray(new DatanodeDetails[storages.size()]); + storages.keySet().toArray(new DatanodeDetails[0]); Arrays.sort(dnDetails); for (int i = dataBlocks; i < parityBlocks + dataBlocks; i++) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 5ff8d713649..719715ac8b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -17,11 +17,18 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -31,20 +38,23 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; import org.apache.hadoop.hdds.scm.block.ScmBlockDeletingServiceMetrics; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -80,17 +90,6 @@ import org.slf4j.LoggerFactory; import org.slf4j.event.Level; -import java.io.IOException; -import java.time.Duration; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.List; -import java.util.HashSet; -import java.util.ArrayList; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - import static java.lang.Math.max; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; @@ -610,7 +609,7 @@ public void testContainerDeleteWithInvalidKeyCount() final int valueSize = value.getBytes(UTF_8).length; final int keyCount = 1; List containerIdList = new ArrayList<>(); - containerInfos.stream().forEach(container -> { + containerInfos.forEach(container -> { assertEquals(valueSize, container.getUsedBytes()); assertEquals(keyCount, container.getNumberOfKeys()); containerIdList.add(container.getContainerID()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 9bdbc70fb99..e99bdea85ea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -41,6 +41,11 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import jakarta.annotation.Nonnull; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -57,6 +62,7 @@ import org.apache.hadoop.hdds.scm.net.NodeImpl; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.Table; @@ -71,6 +77,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -86,7 +93,6 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; @@ -99,18 +105,14 @@ import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static java.lang.String.format; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; @@ -159,15 +161,11 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; +import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.util.Time.monotonicNow; -import jakarta.annotation.Nonnull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Implementation of keyManager. */ @@ -1721,7 +1719,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, cacheKeyMap.clear(); List keyInfoList = new ArrayList<>(fileStatusList.size()); - fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add); + fileStatusList.stream().map(OzoneFileStatus::getKeyInfo).forEach(keyInfoList::add); if (args.getLatestVersionLocation()) { slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 618a837b168..68a8ee7fc4b 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -29,6 +29,8 @@ import java.util.List; import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -68,21 +70,21 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.hadoop.security.token.Token; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -90,9 +92,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Basic Implementation of the OzoneFileSystem calls. *

@@ -595,8 +594,8 @@ private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) { nameList.add(dn.getHostName() + ":" + port); }); - String[] hosts = hostList.toArray(new String[hostList.size()]); - String[] names = nameList.toArray(new String[nameList.size()]); + String[] hosts = hostList.toArray(new String[0]); + String[] names = nameList.toArray(new String[0]); BlockLocation blockLocation = new BlockLocation( names, hosts, offsetOfBlockInFile, omKeyLocationInfo.getLength()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index fefc87184ff..41e47d91aa9 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -32,6 +32,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -65,17 +66,17 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneFsServerDefaults; +import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.OzoneSnapshot; +import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -85,30 +86,26 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; - -import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .BUCKET_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .VOLUME_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; @@ -1087,8 +1084,8 @@ private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) { nameList.add(dn.getHostName() + ":" + port); }); - String[] hosts = hostList.toArray(new String[hostList.size()]); - String[] names = nameList.toArray(new String[nameList.size()]); + String[] hosts = hostList.toArray(new String[0]); + String[] names = nameList.toArray(new String[0]); BlockLocation blockLocation = new BlockLocation( names, hosts, offsetOfBlockInFile, omKeyLocationInfo.getLength());