Skip to content

Commit

Permalink
HDDS-11696. Limit max number of entries in list keys/status response (a…
Browse files Browse the repository at this point in the history
  • Loading branch information
swamirishi authored Nov 19, 2024
1 parent e96e314 commit 2cef393
Show file tree
Hide file tree
Showing 15 changed files with 271 additions and 31 deletions.
8 changes: 8 additions & 0 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -4556,4 +4556,12 @@
</description>
</property>

<property>
<name>ozone.om.server.list.max.size</name>
<value>1000</value>
<tag>OZONE, OM</tag>
<description>
Configuration property to configure the max server side response size for list calls on om.
</description>
</property>
</configuration>
Original file line number Diff line number Diff line change
Expand Up @@ -1808,7 +1808,6 @@ private boolean getChildrenKeys(String keyPrefix, String startKey,
// 1. Get immediate children of keyPrefix, starting with startKey
List<OzoneFileStatusLight> statuses = proxy.listStatusLight(volumeName,
name, keyPrefix, false, startKey, listCacheSize, true);
boolean reachedLimitCacheSize = statuses.size() == listCacheSize;

// 2. Special case: ListKey expects keyPrefix element should present in
// the resultList, only if startKey is blank. If startKey is not blank
Expand Down Expand Up @@ -1840,7 +1839,7 @@ private boolean getChildrenKeys(String keyPrefix, String startKey,
// Return it so that the next iteration will be
// started using the stacked items.
return true;
} else if (reachedLimitCacheSize && indx == statuses.size() - 1) {
} else if (indx == statuses.size() - 1) {
// The last element is a FILE and reaches the listCacheSize.
// Now, sets next seek key to this element
stack.push(new ImmutablePair<>(keyPrefix, keyInfo.getKeyName()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,19 @@ public boolean isSnapshotPath() {
return false;
}

/**
* If the path is a snapshot path get the snapshot name from the key name.
*/
public String getSnapshotName() {
if (keyName.startsWith(OM_SNAPSHOT_INDICATOR)) {
if (!bucketName.isEmpty() && !volumeName.isEmpty()) {
String[] keyNames = keyName.split(OZONE_URI_DELIMITER);
return keyNames.length > 1 ? keyNames[1] : null;
}
}
return null;
}

/**
* If key name is not empty, the given path is a key.
* e.g. /volume1/bucket2/key3 is a key.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -625,4 +625,9 @@ private OMConfigKeys() {
public static final String OZONE_OM_MAX_BUCKET =
"ozone.om.max.buckets";
public static final int OZONE_OM_MAX_BUCKET_DEFAULT = 100000;
/**
* Configuration property to configure the max server side response size for list calls.
*/
public static final String OZONE_OM_SERVER_LIST_MAX_SIZE = "ozone.om.server.list.max.size";
public static final int OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT = 1000;
}
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED;
import static org.assertj.core.api.Assertions.assertThat;
Expand Down Expand Up @@ -184,7 +185,7 @@ void init() throws Exception {
conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL);
conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL);
conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2);

conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2);
conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
conf.setBoolean(OZONE_ACL_ENABLED, true);
conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
Expand Down Expand Up @@ -2093,8 +2094,8 @@ void testListStatus2() throws IOException {
final long initialListStatusCount = omMetrics.getNumListStatus();
FileStatus[] statusList = fs.listStatus(createPath("/"));
assertEquals(1, statusList.length);
assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 1);
assertEquals(initialListStatusCount + 1, omMetrics.getNumListStatus());
assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2);
assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus());
assertEquals(fs.getFileStatus(path), statusList[0]);

dirPath = RandomStringUtils.randomAlphanumeric(5);
Expand All @@ -2105,8 +2106,8 @@ void testListStatus2() throws IOException {

statusList = fs.listStatus(createPath("/"));
assertEquals(2, statusList.length);
assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2);
assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus());
assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 4);
assertEquals(initialListStatusCount + 4, omMetrics.getNumListStatus());
for (Path p : paths) {
assertThat(Arrays.asList(statusList)).contains(fs.getFileStatus(p));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import java.util.concurrent.atomic.AtomicInteger;
Expand All @@ -49,10 +51,13 @@

import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static org.assertj.core.api.Assertions.assertThat;
Expand Down Expand Up @@ -91,6 +96,8 @@ static void initClass() throws Exception {
conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true);
conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS);
conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE);
conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 20);
conf.setInt(OZONE_FS_LISTING_PAGE_SIZE, 30);

// Start the cluster
cluster = MiniOzoneCluster.newHABuilder(conf)
Expand Down Expand Up @@ -289,6 +296,13 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception {
String snapshotPath2 = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH +
OM_KEY_PREFIX + snapshotName2;
String snapshotKeyPath2 = snapshotPath2 + OM_KEY_PREFIX + key2;
List<String> snapshotNames = new ArrayList<>();
for (int i = 0; i < cluster.getConf().getInt(OZONE_FS_LISTING_PAGE_SIZE,
OZONE_FS_LISTING_PAGE_SIZE_DEFAULT) * 2; i++) {
snapshotNames.add(createSnapshot());
}
String snapshotName3 = createSnapshot();


int res = ToolRunner.run(shell,
new String[]{"-deleteSnapshot", BUCKET_PATH, snapshotName1});
Expand All @@ -313,6 +327,10 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception {

assertThat(listSnapOut).doesNotContain(snapshotName1);
assertThat(listSnapOut).contains(snapshotName2);
assertThat(listSnapOut).contains(snapshotName3);
for (String snapshotName : snapshotNames) {
assertThat(listSnapOut).contains(snapshotName);
}

// Check for snapshot keys with "ozone fs -ls"
String listSnapKeyOut = execShellCommandAndGetOutput(1,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;

import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
Expand Down Expand Up @@ -150,7 +153,7 @@ private void verifyDirTree(String volumeName, String bucketName, int depth,
FileStatus[] fileStatuses = fileSystem.listStatus(rootDir);
// verify the num of peer directories, expected span count is 1
// as it has only one dir at root.
verifyActualSpan(1, fileStatuses);
verifyActualSpan(1, Arrays.asList(fileStatuses));
for (FileStatus fileStatus : fileStatuses) {
int actualDepth =
traverseToLeaf(fileSystem, fileStatus.getPath(), 1, depth, span,
Expand All @@ -164,14 +167,16 @@ private int traverseToLeaf(FileSystem fs, Path dirPath, int depth,
int expectedFileCnt, StorageSize perFileSize)
throws IOException {
FileStatus[] fileStatuses = fs.listStatus(dirPath);
List<FileStatus> fileStatusList = new ArrayList<>();
Collections.addAll(fileStatusList, fileStatuses);
// check the num of peer directories except root and leaf as both
// has less dirs.
if (depth < expectedDepth - 1) {
verifyActualSpan(expectedSpanCnt, fileStatuses);
verifyActualSpan(expectedSpanCnt, fileStatusList);
}
int actualNumFiles = 0;
ArrayList <String> files = new ArrayList<>();
for (FileStatus fileStatus : fileStatuses) {
for (FileStatus fileStatus : fileStatusList) {
if (fileStatus.isDirectory()) {
++depth;
return traverseToLeaf(fs, fileStatus.getPath(), depth, expectedDepth,
Expand All @@ -192,7 +197,7 @@ private int traverseToLeaf(FileSystem fs, Path dirPath, int depth,
}

private int verifyActualSpan(int expectedSpanCnt,
FileStatus[] fileStatuses) {
List<FileStatus> fileStatuses) {
int actualSpan = 0;
for (FileStatus fileStatus : fileStatuses) {
if (fileStatus.isDirectory()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
import static com.google.common.collect.Lists.newLinkedList;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
import static org.junit.jupiter.params.provider.Arguments.of;
import static org.junit.jupiter.api.Assertions.assertEquals;

Expand Down Expand Up @@ -80,6 +81,7 @@ public static void init() throws Exception {
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3);
conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3);
conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@

import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
import static org.junit.jupiter.api.Assertions.assertEquals;

/**
Expand Down Expand Up @@ -81,6 +82,7 @@ public static void init() throws Exception {
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3);
conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3);
conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,8 @@

import com.google.common.collect.Lists;

import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.HBASE_SUPPORT;
import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA;
import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT;
Expand Down Expand Up @@ -181,9 +183,16 @@ public class OzoneManagerRequestHandler implements RequestHandler {
LoggerFactory.getLogger(OzoneManagerRequestHandler.class);
private final OzoneManager impl;
private FaultInjector injector;
private long maxKeyListSize;


public OzoneManagerRequestHandler(OzoneManager om) {
this.impl = om;
this.maxKeyListSize = om.getConfiguration().getLong(OZONE_OM_SERVER_LIST_MAX_SIZE,
OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT);
if (this.maxKeyListSize <= 0) {
this.maxKeyListSize = OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT;
}
}

//TODO simplify it to make it shorter
Expand Down Expand Up @@ -745,7 +754,7 @@ private ListKeysResponse listKeys(ListKeysRequest request, int clientVersion)
request.getBucketName(),
request.getStartKey(),
request.getPrefix(),
request.getCount());
(int)Math.min(this.maxKeyListSize, request.getCount()));
for (OmKeyInfo key : listKeysResult.getKeys()) {
resp.addKeyInfo(key.getProtobuf(true, clientVersion));
}
Expand All @@ -763,7 +772,7 @@ private ListKeysLightResponse listKeysLight(ListKeysRequest request)
request.getBucketName(),
request.getStartKey(),
request.getPrefix(),
request.getCount());
(int)Math.min(this.maxKeyListSize, request.getCount()));
for (BasicOmKeyInfo key : listKeysLightResult.getKeys()) {
resp.addBasicKeyInfo(key.getProtobuf());
}
Expand Down Expand Up @@ -1234,7 +1243,7 @@ private ListStatusResponse listStatus(
request.hasAllowPartialPrefix() && request.getAllowPartialPrefix();
List<OzoneFileStatus> statuses =
impl.listStatus(omKeyArgs, request.getRecursive(),
request.getStartKey(), request.getNumEntries(),
request.getStartKey(), Math.min(this.maxKeyListSize, request.getNumEntries()),
allowPartialPrefixes);
ListStatusResponse.Builder
listStatusResponseBuilder =
Expand All @@ -1260,7 +1269,7 @@ private ListStatusLightResponse listStatusLight(
request.hasAllowPartialPrefix() && request.getAllowPartialPrefix();
List<OzoneFileStatusLight> statuses =
impl.listStatusLight(omKeyArgs, request.getRecursive(),
request.getStartKey(), request.getNumEntries(),
request.getStartKey(), Math.min(this.maxKeyListSize, request.getNumEntries()),
allowPartialPrefixes);
ListStatusLightResponse.Builder
listStatusLightResponseBuilder =
Expand Down Expand Up @@ -1488,7 +1497,7 @@ private OzoneManagerProtocolProtos.ListSnapshotResponse getSnapshots(
throws IOException {
ListSnapshotResponse implResponse = impl.listSnapshot(
request.getVolumeName(), request.getBucketName(), request.getPrefix(),
request.getPrevSnapshot(), request.getMaxListResult());
request.getPrevSnapshot(), (int)Math.min(request.getMaxListResult(), maxKeyListSize));

List<OzoneManagerProtocolProtos.SnapshotInfo> snapshotInfoList = implResponse.getSnapshotInfos()
.stream().map(SnapshotInfo::getProtobuf).collect(Collectors.toList());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,8 @@ public void testMultiTenancyRequestsWhenDisabled() throws IOException {

final OzoneManager ozoneManager = mock(OzoneManager.class);
doCallRealMethod().when(ozoneManager).checkS3MultiTenancyEnabled();

final OzoneConfiguration conf = new OzoneConfiguration();
when(ozoneManager.getConfiguration()).thenReturn(conf);
when(ozoneManager.isS3MultiTenancyEnabled()).thenReturn(false);

final String tenantId = "test-tenant";
Expand Down
Loading

0 comments on commit 2cef393

Please sign in to comment.