Skip to content

Commit

Permalink
Merge branch 'master' of github.com:ceekay47/ozone into HDDS-10206
Browse files Browse the repository at this point in the history
  • Loading branch information
ceekay committed Feb 28, 2024
2 parents 140ec36 + 8c4ab8e commit 05b4c2b
Show file tree
Hide file tree
Showing 276 changed files with 7,239 additions and 4,392 deletions.
9 changes: 6 additions & 3 deletions SECURITY.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,16 @@
The first stable release of Apache Ozone is 1.0, the previous alpha and beta releases are not supported by the community.

| Version | Supported |
| ------------- | ------------------ |
|---------------| ------------------ |
| 0.3.0 (alpha) | :x: |
| 0.4.0 (alpha) | :x: |
| 0.4.1 (alpha) | :x: |
| 0.5.0 (beta) | :x: |
| 1.0 | :white_check_mark: |
| 1.1 | :white_check_mark: |
| 1.0.0 | :x: |
| 1.1.0 | :x: |
| 1.2.1 | :x: |
| 1.3.0 | :x: |
| 1.4.0 | :white_check_mark: |

## Reporting a Vulnerability

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,13 @@ public enum ChecksumCombineMode {
// 3 concurrent stripe read should be enough.
private int ecReconstructStripeReadPoolLimit = 10 * 3;

@Config(key = "ec.reconstruct.stripe.write.pool.limit",
defaultValue = "30",
description = "Thread pool max size for parallelly write" +
" available ec chunks to reconstruct the whole stripe.",
tags = ConfigTag.CLIENT)
private int ecReconstructStripeWritePoolLimit = 10 * 3;

@Config(key = "checksum.combine.mode",
defaultValue = "COMPOSITE_CRC",
description = "The combined checksum type [MD5MD5CRC / COMPOSITE_CRC] "
Expand All @@ -224,7 +231,7 @@ public enum ChecksumCombineMode {
private String fsDefaultBucketLayout = "FILE_SYSTEM_OPTIMIZED";

@PostConstruct
private void validate() {
public void validate() {
Preconditions.checkState(streamBufferSize > 0);
Preconditions.checkState(streamBufferFlushSize > 0);
Preconditions.checkState(streamBufferMaxSize > 0);
Expand Down Expand Up @@ -387,6 +394,14 @@ public int getEcReconstructStripeReadPoolLimit() {
return ecReconstructStripeReadPoolLimit;
}

public void setEcReconstructStripeWritePoolLimit(int poolLimit) {
this.ecReconstructStripeWritePoolLimit = poolLimit;
}

public int getEcReconstructStripeWritePoolLimit() {
return ecReconstructStripeWritePoolLimit;
}

public void setFsDefaultBucketLayout(String bucketLayout) {
if (!bucketLayout.isEmpty()) {
this.fsDefaultBucketLayout = bucketLayout;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;

import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
Expand Down Expand Up @@ -145,7 +146,8 @@ public BlockOutputStream(
BufferPool bufferPool,
OzoneClientConfig config,
Token<? extends TokenIdentifier> token,
ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs
ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs,
Supplier<ExecutorService> blockOutputStreamResourceProvider
) throws IOException {
this.xceiverClientFactory = xceiverClientManager;
this.config = config;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.function.Supplier;
import java.util.stream.Collectors;

import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync;
Expand Down Expand Up @@ -75,10 +77,11 @@ public ECBlockOutputStream(
BufferPool bufferPool,
OzoneClientConfig config,
Token<? extends TokenIdentifier> token,
ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs
ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs,
Supplier<ExecutorService> executorServiceSupplier
) throws IOException {
super(blockID, xceiverClientManager,
pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs);
pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs, executorServiceSupplier);
// In EC stream, there will be only one node in pipeline.
this.datanodeDetails = pipeline.getClosestNode();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.function.Supplier;

/**
* An {@link OutputStream} used by the REST service in combination with the
Expand Down Expand Up @@ -65,8 +67,8 @@ public class RatisBlockOutputStream extends BlockOutputStream
/**
* Creates a new BlockOutputStream.
*
* @param blockID block ID
* @param bufferPool pool of buffers
* @param blockID block ID
* @param bufferPool pool of buffers
*/
@SuppressWarnings("checkstyle:ParameterNumber")
public RatisBlockOutputStream(
Expand All @@ -76,10 +78,11 @@ public RatisBlockOutputStream(
BufferPool bufferPool,
OzoneClientConfig config,
Token<? extends TokenIdentifier> token,
ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs
ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs,
Supplier<ExecutorService> blockOutputStreamResourceProvider
) throws IOException {
super(blockID, xceiverClientManager, pipeline,
bufferPool, config, token, clientMetrics, streamBufferArgs);
bufferPool, config, token, clientMetrics, streamBufferArgs, blockOutputStreamResourceProvider);
this.commitWatcher = new CommitWatcher(bufferPool, getXceiverClient());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ public void write(@Nonnull byte[] byteArray) throws IOException {
write(ByteBuffer.wrap(byteArray));
}

@Override
public void write(@Nonnull byte[] byteArray, int off, int len) throws IOException {
write(ByteBuffer.wrap(byteArray), off, len);
}

@Override
public void write(int b) throws IOException {
write(new byte[]{(byte) b});
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm;

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.junit.jupiter.api.Test;

import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE;
import static org.junit.jupiter.api.Assertions.assertEquals;

class TestOzoneClientConfig {

@Test
void missingSizeSuffix() {
final int bytes = 1024;

OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt("ozone.client.bytes.per.checksum", bytes);

OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class);

assertEquals(OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE, subject.getBytesPerChecksum());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;

import static java.util.concurrent.Executors.newFixedThreadPool;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
Expand Down Expand Up @@ -108,7 +109,9 @@ private BlockOutputStream createBlockOutputStream(BufferPool bufferPool)
bufferPool,
config,
null,
ContainerClientMetrics.acquire(), streamBufferArgs);
ContainerClientMetrics.acquire(),
streamBufferArgs,
() -> newFixedThreadPool(10));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,9 @@ private HddsConfigKeys() {
HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL =
"hdds.security.client.scm.secretkey.datanode.protocol.acl";

public static final String OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL =
"ozone.security.reconfigure.protocol.acl";

// Determines if the Container Chunk Manager will write user data to disk
// Set to false only for specific performance tests
public static final String HDDS_CONTAINER_PERSISTDATA =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;

import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.ratis.server.RaftServerConfigKeys;

import static java.util.Collections.unmodifiableSortedSet;
Expand Down Expand Up @@ -323,7 +324,67 @@ private static void addDeprecatedKeys() {
new DeprecationDelta("ozone.scm.chunk.layout",
ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY),
new DeprecationDelta("hdds.datanode.replication.work.dir",
OZONE_CONTAINER_COPY_WORKDIR)
OZONE_CONTAINER_COPY_WORKDIR),
new DeprecationDelta("dfs.container.chunk.write.sync",
OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY),
new DeprecationDelta("dfs.container.ipc",
OzoneConfigKeys.DFS_CONTAINER_IPC_PORT),
new DeprecationDelta("dfs.container.ipc.random.port",
OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT),
new DeprecationDelta("dfs.container.ratis.admin.port",
OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT),
new DeprecationDelta("dfs.container.ratis.datanode.storage.dir",
OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR),
new DeprecationDelta("dfs.container.ratis.datastream.enabled",
OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED),
new DeprecationDelta("dfs.container.ratis.datastream.port",
OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT),
new DeprecationDelta("dfs.container.ratis.datastream.random.port",
OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT),
new DeprecationDelta("dfs.container.ratis.enabled",
ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY),
new DeprecationDelta("dfs.container.ratis.ipc",
OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT),
new DeprecationDelta("dfs.container.ratis.ipc.random.port",
OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT),
new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit",
ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT),
new DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit",
ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT),
new DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements",
ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS),
new DeprecationDelta("dfs.container.ratis.log.purge.gap",
ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP),
new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit",
ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT),
new DeprecationDelta("dfs.container.ratis.log.queue.num-elements",
ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS),
new DeprecationDelta("dfs.container.ratis.num.container.op.executors",
ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY),
new DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume",
ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME),
new DeprecationDelta("dfs.container.ratis.replication.level",
ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY),
new DeprecationDelta("dfs.container.ratis.rpc.type",
ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY),
new DeprecationDelta("dfs.container.ratis.segment.preallocated.size",
ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY),
new DeprecationDelta("dfs.container.ratis.segment.size",
ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY),
new DeprecationDelta("dfs.container.ratis.server.port",
OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT),
new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries",
ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES),
new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout",
ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT),
new DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions",
ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS),
new DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration",
ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY),
new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration",
ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY),
new DeprecationDelta("dfs.ratis.snapshot.threshold",
ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY)
});
}

Expand Down
Loading

0 comments on commit 05b4c2b

Please sign in to comment.