Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HDDS-10630. Add missing parent directories deleted between initiate and complete MPU #6496

Merged
merged 7 commits into from
Apr 12, 2024
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

import java.io.IOException;
import java.nio.file.InvalidPathException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
Expand All @@ -31,6 +32,10 @@
import java.util.function.BiFunction;

import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.ozone.om.OzoneConfigUtil;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
import org.apache.ratis.server.protocol.TermIndex;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
Expand All @@ -41,6 +46,7 @@
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
Expand Down Expand Up @@ -176,11 +182,73 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager,
volumeName, bucketName);

String ozoneKey = omMetadataManager.getOzoneKey(
volumeName, bucketName, keyName);

String dbOzoneKey =
getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName);
List<OmDirectoryInfo> missingParentInfos;
OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest
.verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName,
keyName, Paths.get(keyName));
missingParentInfos = OMDirectoryCreateRequestWithFSO
.getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo,
pathInfoFSO, trxnLogIndex);

if (missingParentInfos != null) {
final long volumeId = omMetadataManager.getVolumeId(volumeName);
final long bucketId = omMetadataManager.getBucketId(volumeName,
bucketName);

// add all missing parents to directory table
addMissingParentsToTable(omBucketInfo, missingParentInfos,
omMetadataManager, volumeId, bucketId, trxnLogIndex);

String multipartOpenKey = omMetadataManager
.getMultipartKey(volumeId, bucketId,
pathInfoFSO.getLastKnownParentId(),
pathInfoFSO.getLeafNodeName(),
keyArgs.getMultipartUploadID());

if (getOmKeyInfoFromOpenKeyTable(multipartOpenKey,
keyName, omMetadataManager) == null) {

final ReplicationConfig replicationConfig = OzoneConfigUtil
.resolveReplicationConfigPreference(keyArgs.getType(),
keyArgs.getFactor(), keyArgs.getEcReplicationConfig(),
omBucketInfo != null ?
omBucketInfo.getDefaultReplicationConfig() :
null, ozoneManager);

OmMultipartKeyInfo multipartKeyInfoFromArgs =
new OmMultipartKeyInfo.Builder()
.setUploadID(keyArgs.getMultipartUploadID())
.setCreationTime(keyArgs.getModificationTime())
.setReplicationConfig(replicationConfig)
.setObjectID(pathInfoFSO.getLeafNodeObjectId())
.setUpdateID(trxnLogIndex)
.setParentID(pathInfoFSO.getLastKnownParentId())
.build();

OmKeyInfo keyInfoFromArgs = new OmKeyInfo.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setKeyName(keyName)
.setCreationTime(keyArgs.getModificationTime())
.setModificationTime(keyArgs.getModificationTime())
.setReplicationConfig(replicationConfig)
.setOmKeyLocationInfos(Collections.singletonList(
new OmKeyLocationInfoGroup(0, new ArrayList<>(), true)))
.setAcls(getAclsForKey(keyArgs, omBucketInfo, pathInfoFSO,
ozoneManager.getPrefixManager()))
.setObjectID(pathInfoFSO.getLeafNodeObjectId())
.setUpdateID(trxnLogIndex)
.setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ?
OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null)
.setParentObjectID(pathInfoFSO.getLastKnownParentId())
.build();

// Add missing multi part info to open key table
addMultiParttoOpenTable(omMetadataManager, multipartOpenKey,
multipartKeyInfoFromArgs, pathInfoFSO, keyInfoFromArgs,
volumeId, bucketId, trxnLogIndex);
}
}

String dbMultipartOpenKey =
getDBMultipartOpenKey(volumeName, bucketName, keyName, uploadID,
Expand All @@ -189,6 +257,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
OmMultipartKeyInfo multipartKeyInfo = omMetadataManager
.getMultipartInfoTable().get(multipartKey);

String ozoneKey = omMetadataManager.getOzoneKey(
volumeName, bucketName, keyName);

String dbOzoneKey =
getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName);

// Check for directory exists with same name for the LEGACY_FS,
// if it exists throw error.
checkDirectoryAlreadyExists(ozoneManager, omBucketInfo, keyName,
Expand Down Expand Up @@ -464,6 +538,24 @@ protected String getDBOzoneKey(OMMetadataManager omMetadataManager,
return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
}

protected void addMissingParentsToTable(OmBucketInfo omBucketInfo,
List<OmDirectoryInfo> missingParentInfos,
OMMetadataManager omMetadataManager,
long volumeId, long bucketId, long transactionLogIndex
) throws IOException {
// FSO is disabled. Do nothing.
}

@SuppressWarnings("checkstyle:ParameterNumber")
protected void addMultiParttoOpenTable(
OMMetadataManager omMetadataManager, String multipartOpenKey,
OmMultipartKeyInfo multipartKeyInfo,
OMFileRequest.OMPathInfoWithFSO pathInfoFSO, OmKeyInfo omKeyInfo,
long volumeId, long bucketId, long transactionLogIndex
) throws IOException {
// FSO is disabled. Do nothing.
}

protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneKey,
String keyName, OMMetadataManager omMetadataManager) throws IOException {
return omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,15 @@

package org.apache.hadoop.ozone.om.request.s3.multipart;

import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse;
Expand Down Expand Up @@ -74,6 +77,70 @@ protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager,
}
}

@Override
protected void addMissingParentsToTable(OmBucketInfo omBucketInfo,
List<OmDirectoryInfo> missingParentInfos,
OMMetadataManager omMetadataManager, long volumeId, long bucketId,
long transactionLogIndex) throws IOException {

// validate and update namespace for missing parent directory.
checkBucketQuotaInNamespace(omBucketInfo, missingParentInfos.size());
omBucketInfo.incrUsedNamespace(missingParentInfos.size());

// Add cache entries for the missing parent directories.
OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
volumeId, bucketId, transactionLogIndex,
missingParentInfos, null);

// Create missing parent directory entries.
try (BatchOperation batchOperation = omMetadataManager.getStore()
.initBatchOperation()) {
for (OmDirectoryInfo parentDirInfo : missingParentInfos) {
final String parentKey = omMetadataManager.getOzonePathKey(
volumeId, bucketId, parentDirInfo.getParentObjectID(),
parentDirInfo.getName());
omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this should be done in S3MultipartUploadCompleteResponseWithFSO instead here or else db entry will be added only on the leader OM.

parentKey, parentDirInfo);
}

// namespace quota changes for parent directory
String bucketKey = omMetadataManager.getBucketKey(
omBucketInfo.getVolumeName(),
omBucketInfo.getBucketName());
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
bucketKey, omBucketInfo);

omMetadataManager.getStore().commitBatchOperation(batchOperation);
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think adding to DB batch should be handled in the response object by design. It uses an existing batch and commits only if the response is OK.

@Override
public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {
/**
* Create parent directory entries during MultiPartFileKey Create - do not
* wait for File Commit request.
*/
if (parentDirInfos != null) {
for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
final String parentKey = omMetadataManager.getOzonePathKey(
volumeId, bucketId, parentDirInfo.getParentObjectID(),
parentDirInfo.getName());
omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
parentKey, parentDirInfo);
}
// namespace quota changes for parent directory
String bucketKey = omMetadataManager.getBucketKey(
omBucketInfo.getVolumeName(),
omBucketInfo.getBucketName());
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
bucketKey, omBucketInfo);
}

}

@Override
protected void addMultiParttoOpenTable(
OMMetadataManager omMetadataManager, String multipartOpenKey,
OmMultipartKeyInfo multipartKeyInfo,
OMFileRequest.OMPathInfoWithFSO pathInfoFSO, OmKeyInfo omKeyInfo,
long volumeId, long bucketId, long transactionLogIndex
) throws IOException {

// Add multi part to cache
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(),
transactionLogIndex);

// Add multi part to open key table.
try (BatchOperation batchOperation = omMetadataManager.getStore()
.initBatchOperation()) {

OMFileRequest.addToOpenFileTableForMultipart(omMetadataManager,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Even this should be done in S3MultipartUploadCompleteResponseWithFSO instead here.

batchOperation,
omKeyInfo, multipartKeyInfo.getUploadID(), volumeId,
bucketId);

omMetadataManager.getStore().commitBatchOperation(batchOperation);
}
}


@Override
protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneFileKey,
String keyName, OMMetadataManager omMetadataManager) throws IOException {
Expand Down