Skip to content

Commit

Permalink
HDDS-11544. Improve work with arrays (apache#7286)
Browse files Browse the repository at this point in the history
  • Loading branch information
ivanzlenko authored Oct 10, 2024
1 parent 5657604 commit ffe7198
Show file tree
Hide file tree
Showing 16 changed files with 94 additions and 118 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,12 @@
import java.util.List;
import java.util.Optional;

import com.google.common.base.Preconditions;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.validator.routines.DomainValidator;
import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
import org.apache.hadoop.hdds.security.x509.exception.CertificateException;

import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.OzoneSecurityUtil;
import org.bouncycastle.asn1.ASN1EncodableVector;
import org.bouncycastle.asn1.ASN1Object;
Expand Down Expand Up @@ -390,7 +389,7 @@ private Optional<Extension> getSubjectAltNameExtension() throws
if (altNames != null) {
return Optional.of(new Extension(Extension.subjectAlternativeName,
false, new DEROctetString(new GeneralNames(
altNames.toArray(new GeneralName[altNames.size()])))));
altNames.toArray(new GeneralName[0])))));
}
return Optional.empty();
}
Expand All @@ -414,12 +413,10 @@ private Extensions createExtensions() throws IOException {

// Add subject alternate name extension
Optional<Extension> san = getSubjectAltNameExtension();
if (san.isPresent()) {
extensions.add(san.get());
}
san.ifPresent(extensions::add);

return new Extensions(
extensions.toArray(new Extension[extensions.size()]));
extensions.toArray(new Extension[0]));
}

public CertificateSignRequest build() throws SCMSecurityException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import org.apache.hadoop.security.authorize.Service;
import org.apache.ratis.util.MemoizedSupplier;

import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Supplier;

Expand All @@ -50,7 +50,7 @@ public static HddsPolicyProvider getInstance() {
}

private static final List<Service> DN_SERVICES =
Arrays.asList(
Collections.singletonList(
new Service(
OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL,
ReconfigureProtocol.class)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,9 @@
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.stream.Collectors;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
Expand All @@ -69,21 +70,18 @@
import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel;
import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
import org.apache.hadoop.util.Time;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.ratis.proto.RaftProtos;
import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto;
import org.apache.ratis.proto.RaftProtos.LogEntryProto;
import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto;
import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto;
import org.apache.ratis.protocol.Message;
import org.apache.ratis.protocol.RaftClientRequest;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.RaftGroupMemberId;
import org.apache.ratis.protocol.RaftPeerId;
import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.protocol.RaftPeerId;
import org.apache.ratis.protocol.exceptions.StateMachineException;
import org.apache.ratis.server.RaftServer;
import org.apache.ratis.server.protocol.TermIndex;
Expand All @@ -98,10 +96,10 @@
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat;
import org.apache.ratis.util.JavaUtils;
import org.apache.ratis.util.LifeCycle;
import org.apache.ratis.util.TaskQueue;
import org.apache.ratis.util.function.CheckedSupplier;
import org.apache.ratis.util.JavaUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -818,11 +816,9 @@ private ByteString readStateMachineData(
*/
@Override
public CompletableFuture<Void> flush(long index) {
List<CompletableFuture<ContainerCommandResponseProto>> futureList =
writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index)
.map(Map.Entry::getValue).collect(Collectors.toList());
return CompletableFuture.allOf(
futureList.toArray(new CompletableFuture[futureList.size()]));
writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index)
.map(Map.Entry::getValue).toArray(CompletableFuture[]::new));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@
package org.apache.hadoop.ozone.container.ec.reconstruction;

import com.google.common.collect.ImmutableList;
import jakarta.annotation.Nonnull;
import org.apache.commons.collections.map.SingletonMap;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.client.ClientTrustManager;
Expand All @@ -34,8 +36,6 @@
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
import jakarta.annotation.Nonnull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -44,7 +44,6 @@
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;

/**
* This class wraps necessary container-level rpc calls
Expand Down Expand Up @@ -93,14 +92,11 @@ public BlockData[] listBlock(long containerId, DatanodeDetails dn,
try {
return BlockData.getFromProtoBuf(i);
} catch (IOException e) {
LOG.debug("Failed while converting to protobuf BlockData. Returning"
+ " null for listBlock from DN: " + dn,
e);
LOG.debug("Failed while converting to protobuf BlockData. Returning null for listBlock from DN: {}", dn, e);
// TODO: revisit here.
return null;
}
}).collect(Collectors.toList())
.toArray(new BlockData[blockDataList.size()]);
}).toArray(BlockData[]::new);
} finally {
this.xceiverClientManager.releaseClient(xceiverClient, false);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@

import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory;
import org.apache.ozone.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
import org.apache.ozone.erasurecode.rawcoder.NativeXORRawErasureCoderFactory;
import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -31,7 +31,6 @@
import java.util.Map;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.stream.Collectors;

/**
* This class registers all coder implementations.
Expand Down Expand Up @@ -108,8 +107,8 @@ void updateCoders(Iterable<RawErasureCoderFactory> coderFactories) {
String codecName = entry.getKey();
List<RawErasureCoderFactory> coders = entry.getValue();
coderNameMap.put(codecName, coders.stream().
map(RawErasureCoderFactory::getCoderName).
collect(Collectors.toList()).toArray(new String[0]));
map(RawErasureCoderFactory::getCoderName)
.toArray(String[]::new));
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,22 +41,20 @@
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.apache.commons.fileupload.util.Streams;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.server.OzoneAdmins;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.DBStore;

import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.hadoop.hdds.utils.HddsServerUtil.writeDBCheckpointToStream;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST;
import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX;

import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Provides the current checkpoint Snapshot of the OM/SCM DB. (tar)
*/
Expand Down Expand Up @@ -287,7 +285,7 @@ private static String[] parseFormDataParameters(HttpServletRequest request) {
LOG.warn("Exception occured during form data parsing {}", e.getMessage());
}

return sstParam.size() == 0 ? null : sstParam.toArray(new String[0]);
return sstParam.isEmpty() ? null : sstParam.toArray(new String[0]);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -691,7 +691,7 @@ private void checkIterationMoveResults() {
moveSelectionToFutureMap.values();
if (!futures.isEmpty()) {
CompletableFuture<Void> allFuturesResult = CompletableFuture.allOf(
futures.toArray(new CompletableFuture[futures.size()]));
futures.toArray(new CompletableFuture[0]));
try {
allFuturesResult.get(config.getMoveTimeout().toMillis(),
TimeUnit.MILLISECONDS);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -346,8 +346,7 @@ protected List<DatanodeDetails> chooseDatanodesInternalLegacy(
return chooseNodes(null, chosenNodes, mutableFavoredNodes,
mutableUsedNodes, favorIndex, nodesRequired, mapSizeRequired);
} else {
List<DatanodeDetails> mutableExcludedNodes = new ArrayList<>();
mutableExcludedNodes.addAll(excludedNodes);
List<DatanodeDetails> mutableExcludedNodes = new ArrayList<>(excludedNodes);
// choose node to meet replication requirement
// case 1: one excluded node, choose one on the same rack as the excluded
// node, choose others on different racks.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@
import java.time.Clock;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.ArrayList;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;

import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
Expand All @@ -39,30 +39,30 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.HddsTestUtils;
import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps;
import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub;
import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub;
import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerImpl;
import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
Expand All @@ -76,21 +76,19 @@
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand;
import org.apache.ozone.test.GenericTestUtils;

import static org.apache.hadoop.ozone.OzoneConsts.GB;
import static org.apache.hadoop.ozone.OzoneConsts.MB;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;

import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.io.TempDir;

import static org.apache.hadoop.ozone.OzoneConsts.GB;
import static org.apache.hadoop.ozone.OzoneConsts.MB;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;

/**
* Tests for SCM Block Manager.
Expand Down Expand Up @@ -273,7 +271,7 @@ void testAllocateBlockInParallel() throws Exception {
}

CompletableFuture
.allOf(futureList.toArray(new CompletableFuture[futureList.size()]))
.allOf(futureList.toArray(new CompletableFuture[0]))
.get();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ private void commitTransactions(
List<DeleteBlockTransactionResult> transactionResults)
throws IOException {
commitTransactions(transactionResults,
dnList.toArray(new DatanodeDetails[3]));
dnList.toArray(new DatanodeDetails[0]));
}

private void commitTransactions(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -644,7 +644,7 @@ public void testECReplicaIndexValidation() throws NodeNotFoundException,
IOException, TimeoutException {
List<DatanodeDetails> dns = IntStream.range(0, 5)
.mapToObj(i -> randomDatanodeDetails()).collect(Collectors.toList());
dns.stream().forEach(dn -> nodeManager.register(dn, null, null));
dns.forEach(dn -> nodeManager.register(dn, null, null));
ECReplicationConfig replicationConfig = new ECReplicationConfig(3, 2);
final ContainerInfo container = getECContainer(LifeCycleState.CLOSED,
PipelineID.randomId(), replicationConfig);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ public void testPutECKeyAndCheckDNStoredData() throws IOException {
Map<DatanodeDetails, MockDatanodeStorage> storages =
factoryStub.getStorages();
DatanodeDetails[] dnDetails =
storages.keySet().toArray(new DatanodeDetails[storages.size()]);
storages.keySet().toArray(new DatanodeDetails[0]);
Arrays.sort(dnDetails);
for (int i = 0; i < inputChunks.length; i++) {
MockDatanodeStorage datanodeStorage = storages.get(dnDetails[i]);
Expand Down Expand Up @@ -182,7 +182,7 @@ public void testPutECKeyAndCheckParityData() throws IOException {
Map<DatanodeDetails, MockDatanodeStorage> storages =
factoryStub.getStorages();
DatanodeDetails[] dnDetails =
storages.keySet().toArray(new DatanodeDetails[storages.size()]);
storages.keySet().toArray(new DatanodeDetails[0]);
Arrays.sort(dnDetails);

for (int i = dataBlocks; i < parityBlocks + dataBlocks; i++) {
Expand Down
Loading

0 comments on commit ffe7198

Please sign in to comment.