diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 6cb69ca77bb..30c88920520 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -39,6 +39,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -62,6 +63,7 @@ import java.util.NoSuchElementException; import java.util.stream.Collectors; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -620,6 +622,16 @@ public void deleteKeys(List keyList) throws IOException { proxy.deleteKeys(volumeName, name, keyList); } + /** + * Deletes the given list of keys from the bucket. + * @param keyList List of the key name to be deleted. + * @param quiet flag to not throw exception if delete fails + * @throws IOException + */ + public Map deleteKeys(List keyList, boolean quiet) throws IOException { + return proxy.deleteKeys(volumeName, name, keyList, quiet); + } + /** * Rename the keyname from fromKeyName to toKeyName. * @param fromKeyName The original key name. @@ -1271,25 +1283,33 @@ protected void initDelimiterKeyPrefix() { protected List buildKeysWithKeyPrefix( List statuses) { return statuses.stream() - .map(status -> { - BasicOmKeyInfo keyInfo = status.getKeyInfo(); - String keyName = keyInfo.getKeyName(); - if (status.isDirectory()) { - // add trailing slash to represent directory - keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - } - return new OzoneKey(keyInfo.getVolumeName(), - keyInfo.getBucketName(), keyName, - keyInfo.getDataSize(), keyInfo.getCreationTime(), - keyInfo.getModificationTime(), - keyInfo.getReplicationConfig(), keyInfo.isFile()); - }) + .map(OzoneBucket::toOzoneKey) .filter(key -> StringUtils.startsWith(key.getName(), getKeyPrefix())) .collect(Collectors.toList()); } } + private static OzoneKey toOzoneKey(OzoneFileStatusLight status) { + BasicOmKeyInfo keyInfo = status.getKeyInfo(); + String keyName = keyInfo.getKeyName(); + final Map metadata; + if (status.isDirectory()) { + // add trailing slash to represent directory + keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + metadata = Collections.emptyMap(); + } else { + metadata = Collections.singletonMap(ETAG, keyInfo.getETag()); + } + return new OzoneKey(keyInfo.getVolumeName(), + keyInfo.getBucketName(), keyName, + keyInfo.getDataSize(), keyInfo.getCreationTime(), + keyInfo.getModificationTime(), + keyInfo.getReplicationConfig(), + metadata, + keyInfo.isFile()); + } + /** * An Iterator to iterate over {@link OzoneKey} list. @@ -1657,21 +1677,7 @@ private boolean getChildrenKeys(String keyPrefix, String startKey, for (int indx = 0; indx < statuses.size(); indx++) { OzoneFileStatusLight status = statuses.get(indx); BasicOmKeyInfo keyInfo = status.getKeyInfo(); - String keyName = keyInfo.getKeyName(); - - OzoneKey ozoneKey; - // Add dir to the dirList - if (status.isDirectory()) { - // add trailing slash to represent directory - keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - } - ozoneKey = new OzoneKey(keyInfo.getVolumeName(), - keyInfo.getBucketName(), keyName, - keyInfo.getDataSize(), keyInfo.getCreationTime(), - keyInfo.getModificationTime(), - keyInfo.getReplicationConfig(), - keyInfo.isFile()); - + OzoneKey ozoneKey = toOzoneKey(status); keysResultList.add(ozoneKey); if (status.isDirectory()) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 91b407e631a..2a22ae305df 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -49,6 +49,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; @@ -404,6 +405,18 @@ void deleteKeys(String volumeName, String bucketName, List keyNameList) throws IOException; + /** + * Deletes keys through the list. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyNameList List of the Key + * @param quiet flag to not throw exception if delete fails + * @throws IOException + */ + Map deleteKeys(String volumeName, String bucketName, + List keyNameList, boolean quiet) + throws IOException; + /** * Renames an existing key within a bucket. * @param volumeName Name of the Volume diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 4fda47553f5..bd95ac6dffd 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -98,6 +98,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -1556,6 +1557,18 @@ public void deleteKeys( ozoneManagerClient.deleteKeys(omDeleteKeys); } + @Override + public Map deleteKeys( + String volumeName, String bucketName, List keyNameList, boolean quiet) + throws IOException { + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyNameList); + OmDeleteKeys omDeleteKeys = new OmDeleteKeys(volumeName, bucketName, + keyNameList); + return ozoneManagerClient.deleteKeys(omDeleteKeys, quiet); + } + @Override public void renameKey(String volumeName, String bucketName, String fromKeyName, String toKeyName) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java index 9c9a5027774..d69c14807ce 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java @@ -20,11 +20,14 @@ import java.io.IOException; import java.util.Objects; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BasicKeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; + /** * Lightweight OmKeyInfo class. */ @@ -38,11 +41,12 @@ public class BasicOmKeyInfo { private long modificationTime; private ReplicationConfig replicationConfig; private boolean isFile; + private final String eTag; @SuppressWarnings("parameternumber") public BasicOmKeyInfo(String volumeName, String bucketName, String keyName, long dataSize, long creationTime, long modificationTime, - ReplicationConfig replicationConfig, boolean isFile) { + ReplicationConfig replicationConfig, boolean isFile, String eTag) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; @@ -51,6 +55,7 @@ public BasicOmKeyInfo(String volumeName, String bucketName, String keyName, this.modificationTime = modificationTime; this.replicationConfig = replicationConfig; this.isFile = isFile; + this.eTag = StringUtils.isNotEmpty(eTag) ? eTag : null; } public String getVolumeName() { @@ -85,6 +90,10 @@ public boolean isFile() { return isFile; } + public String getETag() { + return eTag; + } + /** * Builder of BasicOmKeyInfo. */ @@ -97,6 +106,7 @@ public static class Builder { private long modificationTime; private ReplicationConfig replicationConfig; private boolean isFile; + private String eTag; public Builder setVolumeName(String volumeName) { this.volumeName = volumeName; @@ -138,9 +148,14 @@ public Builder setIsFile(boolean isFile) { return this; } + public Builder setETag(String etag) { + this.eTag = etag; + return this; + } + public BasicOmKeyInfo build() { return new BasicOmKeyInfo(volumeName, bucketName, keyName, dataSize, - creationTime, modificationTime, replicationConfig, isFile); + creationTime, modificationTime, replicationConfig, isFile, eTag); } } @@ -157,6 +172,9 @@ public BasicKeyInfo getProtobuf() { } else { builder.setFactor(ReplicationConfig.getLegacyFactor(replicationConfig)); } + if (StringUtils.isNotEmpty(eTag)) { + builder.setETag(eTag); + } return builder.build(); } @@ -181,6 +199,7 @@ public static BasicOmKeyInfo getFromProtobuf(BasicKeyInfo basicKeyInfo, basicKeyInfo.getType(), basicKeyInfo.getFactor(), basicKeyInfo.getEcReplicationConfig())) + .setETag(basicKeyInfo.getETag()) .setIsFile(!keyName.endsWith("/")); return builder.build(); @@ -205,6 +224,7 @@ public static BasicOmKeyInfo getFromProtobuf(String volumeName, basicKeyInfo.getType(), basicKeyInfo.getFactor(), basicKeyInfo.getEcReplicationConfig())) + .setETag(basicKeyInfo.getETag()) .setIsFile(!keyName.endsWith("/")); return builder.build(); @@ -225,6 +245,7 @@ public boolean equals(Object o) { creationTime == basicOmKeyInfo.creationTime && modificationTime == basicOmKeyInfo.modificationTime && replicationConfig.equals(basicOmKeyInfo.replicationConfig) && + Objects.equals(eTag, basicOmKeyInfo.eTag) && isFile == basicOmKeyInfo.isFile; } @@ -241,6 +262,7 @@ public static BasicOmKeyInfo fromOmKeyInfo(OmKeyInfo omKeyInfo) { omKeyInfo.getCreationTime(), omKeyInfo.getModificationTime(), omKeyInfo.getReplicationConfig(), - omKeyInfo.isFile()); + omKeyInfo.isFile(), + omKeyInfo.getMetadata().get(ETAG)); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ErrorInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ErrorInfo.java new file mode 100644 index 00000000000..7889a568be8 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ErrorInfo.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +/** + * Represent class which has info of error thrown for any operation. + */ +public class ErrorInfo { + private String code; + private String message; + + public ErrorInfo(String errorCode, String errorMessage) { + this.code = errorCode; + this.message = errorMessage; + } + + public String getCode() { + return code; + } + + public void setCode(String errorCode) { + this.code = errorCode; + } + + public String getMessage() { + return message; + } + + public void setMessage(String errorMessage) { + this.message = errorMessage; + } + +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index ad394bf4d1d..306d32eb396 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -21,6 +21,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.UUID; import javax.annotation.Nonnull; @@ -32,6 +33,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.DBUpdates; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -359,6 +361,21 @@ default void deleteKeys(OmDeleteKeys deleteKeys) throws IOException { "this to be implemented, as write requests use a new approach."); } + /** + * Deletes existing key/keys. This interface supports delete + * multiple keys and a single key. Used by deleting files + * through OzoneFileSystem. + * + * @param deleteKeys + * @param quiet - flag to not throw exception if delete fails + * @throws IOException + */ + default Map deleteKeys(OmDeleteKeys deleteKeys, boolean quiet) + throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented, as write requests use a new approach."); + } + /** * Deletes an existing empty bucket from volume. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index e1d6aca863c..809c1daab78 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.time.Instant; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; @@ -41,6 +42,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.ListKeysLightResult; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; import org.apache.hadoop.ozone.om.helpers.DBUpdates; @@ -943,6 +945,12 @@ public void deleteKey(OmKeyArgs args) throws IOException { */ @Override public void deleteKeys(OmDeleteKeys deleteKeys) throws IOException { + deleteKeys(deleteKeys, false); + } + + @Override + public Map deleteKeys(OmDeleteKeys deleteKeys, boolean quiet) + throws IOException { DeleteKeysRequest.Builder req = DeleteKeysRequest.newBuilder(); DeleteKeyArgs deletedKeys = DeleteKeyArgs.newBuilder() .setBucketName(deleteKeys.getBucket()) @@ -952,9 +960,20 @@ public void deleteKeys(OmDeleteKeys deleteKeys) throws IOException { OMRequest omRequest = createOMRequest(Type.DeleteKeys) .setDeleteKeysRequest(req) .build(); + OMResponse omResponse = submitRequest(omRequest); - handleError(submitRequest(omRequest)); - + Map keyToErrors = new HashMap<>(); + if (quiet) { + List errors = + omResponse.getDeleteKeysResponse().getErrorsList(); + for (OzoneManagerProtocolProtos.DeleteKeyError deleteKeyError : errors) { + keyToErrors.put(deleteKeyError.getKey(), + new ErrorInfo(deleteKeyError.getErrorCode(), deleteKeyError.getErrorMsg())); + } + } else { + handleError(omResponse); + } + return keyToErrors; } /** diff --git a/hadoop-ozone/dev-support/checks/_lib.sh b/hadoop-ozone/dev-support/checks/_lib.sh index b81acf98993..134c8f53c6e 100644 --- a/hadoop-ozone/dev-support/checks/_lib.sh +++ b/hadoop-ozone/dev-support/checks/_lib.sh @@ -149,3 +149,18 @@ install_spotbugs() { _install_spotbugs() { curl -LSs https://repo.maven.apache.org/maven2/com/github/spotbugs/spotbugs/3.1.12/spotbugs-3.1.12.tgz | tar -xz -f - } + +download_hadoop_aws() { + local dir="$1" + + if [[ -z ${dir} ]]; then + echo "Required argument: target directory for Hadoop AWS sources" >&2 + return 1 + fi + + if [[ ! -e "${dir}" ]] || [[ ! -d "${dir}"/src/test/resources ]]; then + mkdir -p "${dir}" + [[ -f "${dir}.tar.gz" ]] || curl -LSs -o "${dir}.tar.gz" https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz + tar -x -z -C "${dir}" --strip-components=3 -f "${dir}.tar.gz" --wildcards 'hadoop-*-src/hadoop-tools/hadoop-aws' || return 1 + fi +} diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index 5139dddcd8c..e6059cd8256 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -81,8 +81,8 @@ for failed_test in $(< ${REPORT_DIR}/summary.txt); do \( -name "${failed_test}.txt" -or -name "${failed_test}-output.txt" -or -name "TEST-${failed_test}.xml" \)); do dir=$(dirname "${file}") dest_dir=$(_realpath --relative-to="${PWD}" "${dir}/../..") || continue - mkdir -p "${REPORT_DIR}/${dest_dir}" - mv "${file}" "${REPORT_DIR}/${dest_dir}"/ + mkdir -pv "${REPORT_DIR}/${dest_dir}" + mv -v "${file}" "${REPORT_DIR}/${dest_dir}"/ done done diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh index 0489fa24384..5be3f7b5879 100755 --- a/hadoop-ozone/dev-support/checks/acceptance.sh +++ b/hadoop-ozone/dev-support/checks/acceptance.sh @@ -19,15 +19,20 @@ set -u -o pipefail DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 -source "${DIR}/_lib.sh" +OZONE_ROOT=$(pwd -P) + +: ${HADOOP_AWS_DIR:=""} +: ${OZONE_ACCEPTANCE_SUITE:=""} +: ${OZONE_TEST_SELECTOR:=""} +: ${OZONE_ACCEPTANCE_TEST_TYPE:="robot"} +: ${OZONE_WITH_COVERAGE:="false"} -install_virtualenv -install_robot +source "${DIR}/_lib.sh" -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/acceptance"} +REPORT_DIR=${OUTPUT_DIR:-"${OZONE_ROOT}/target/acceptance"} OZONE_VERSION=$(mvn help:evaluate -Dexpression=ozone.version -q -DforceStdout) -DIST_DIR="$DIR/../../dist/target/ozone-$OZONE_VERSION" +DIST_DIR="${OZONE_ROOT}/hadoop-ozone/dist/target/ozone-$OZONE_VERSION" if [ ! -d "$DIST_DIR" ]; then echo "Distribution dir is missing. Doing a full build" @@ -36,15 +41,42 @@ fi mkdir -p "$REPORT_DIR" -export OZONE_ACCEPTANCE_SUITE +if [[ "${OZONE_ACCEPTANCE_SUITE}" == "s3a" ]]; then + OZONE_ACCEPTANCE_TEST_TYPE="maven" + + if [[ -z "${HADOOP_AWS_DIR}" ]]; then + HADOOP_VERSION=$(mvn help:evaluate -Dexpression=hadoop.version -q -DforceStdout) + export HADOOP_AWS_DIR=${OZONE_ROOT}/target/hadoop-src + fi + + download_hadoop_aws "${HADOOP_AWS_DIR}" +fi + +if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "robot" ]]; then + install_virtualenv + install_robot +fi + +export OZONE_ACCEPTANCE_SUITE OZONE_ACCEPTANCE_TEST_TYPE cd "$DIST_DIR/compose" || exit 1 ./test-all.sh 2>&1 | tee "${REPORT_DIR}/output.log" RES=$? -cp -rv result/* "$REPORT_DIR/" -cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" -find "$REPORT_DIR" -type f -empty -print0 | xargs -0 rm -v -grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_DIR}/summary.txt" +if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "maven" ]]; then + pushd result + source "${DIR}/_mvn_unit_report.sh" + find . -name junit -print0 | xargs -r -0 rm -frv + cp -rv * "${REPORT_DIR}"/ + popd +else + cp -rv result/* "$REPORT_DIR/" + if [[ -f "${REPORT_DIR}/log.html" ]]; then + cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" + fi + grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_DIR}/summary.txt" +fi + +find "$REPORT_DIR" -type f -empty -not -name summary.txt -print0 | xargs -0 rm -v exit $RES diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh new file mode 100644 index 00000000000..554b22b5a39 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs S3A contract tests against various bucket types on +# a Docker Compose-based Ozone cluster. +# Requires HADOOP_AWS_DIR to point the directory containing hadoop-aws sources. + +if [[ -z ${HADOOP_AWS_DIR} ]] || [[ ! -e ${HADOOP_AWS_DIR} ]]; then + echo "Skipping S3A tests due to missing HADOOP_AWS_DIR (directory with hadoop-aws sources)" >&2 + exit +fi + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +## @description Run S3A contract tests against Ozone. +## @param Ozone S3 bucket +execute_s3a_tests() { + local bucket="$1" + + pushd "${HADOOP_AWS_DIR}" + + # S3A contract tests are enabled by presence of `auth-keys.xml`. + # https://hadoop.apache.org/docs/r3.3.6/hadoop-aws/tools/hadoop-aws/testing.html#Setting_up_the_tests + cat > src/test/resources/auth-keys.xml <<-EOF + + + + fs.s3a.endpoint + http://localhost:9878 + + + + test.fs.s3a.endpoint + http://localhost:9878 + + + + fs.contract.test.fs.s3a + s3a://${bucket}/ + + + + test.fs.s3a.name + s3a://${bucket}/ + + + + fs.s3a.access.key + ${AWS_ACCESS_KEY_ID} + + + + fs.s3a.secret.key + ${AWS_SECRET_ACCESS_KEY} + + + + test.fs.s3a.sts.enabled + false + + + + fs.s3a.committer.staging.conflict-mode + replace + + + + fs.s3a.path.style.access + true + + + + fs.s3a.directory.marker.retention + keep + + + +EOF + + # Some tests are skipped due to known issues. + # - ITestS3AContractDistCp: HDDS-10616 + # - ITestS3AContractGetFileStatusV1List: HDDS-10617 + # - ITestS3AContractRename: HDDS-10665 + mvn -B -V --fail-never --no-transfer-progress \ + -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractRename' \ + clean test + + local target="${RESULT_DIR}/junit/${bucket}/target" + mkdir -p "${target}" + mv -iv target/surefire-reports "${target}"/ + popd +} + +start_docker_env + +if [[ ${SECURITY_ENABLED} == "true" ]]; then + execute_command_in_container s3g kinit -kt /etc/security/keytabs/testuser.keytab "testuser/s3g@EXAMPLE.COM" + access=$(execute_command_in_container s3g ozone s3 getsecret -e) + eval "$access" +else + export AWS_ACCESS_KEY_ID="s3a-contract" + export AWS_SECRET_ACCESS_KEY="unsecure" +fi + +execute_command_in_container s3g ozone sh bucket create --layout OBJECT_STORE /s3v/obs-bucket +execute_command_in_container s3g ozone sh bucket create --layout LEGACY /s3v/leg-bucket +execute_command_in_container s3g ozone sh bucket create --layout FILE_SYSTEM_OPTIMIZED /s3v/fso-bucket + +for bucket in obs-bucket leg-bucket fso-bucket; do + execute_s3a_tests "$bucket" +done diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh index 1361a4c0c33..976e490d32c 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh @@ -38,9 +38,11 @@ execute_robot_test ${SCM} -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-ofs-link ## Exclude virtual-host tests. This is tested separately as it requires additional config. exclude="--exclude virtual-host" for bucket in generated; do - execute_robot_test ${SCM} -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 - # some tests are independent of the bucket type, only need to be run once - exclude="--exclude virtual-host --exclude no-bucket-type" + for layout in OBJECT_STORE LEGACY FILE_SYSTEM_OPTIMIZED; do + execute_robot_test ${SCM} -v BUCKET:${bucket} -v BUCKET_LAYOUT:${layout} -N s3-${layout}-${bucket} ${exclude} s3 + # some tests are independent of the bucket type, only need to be run once + exclude="--exclude virtual-host --exclude no-bucket-type" + done done execute_robot_test ${SCM} freon diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test-s3a.sh b/hadoop-ozone/dist/src/main/compose/ozone/test-s3a.sh new file mode 100644 index 00000000000..c277e71a4bf --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone/test-s3a.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:s3a + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +export SECURITY_ENABLED=false + +source "$COMPOSE_DIR/../common/s3a-test.sh" diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3a.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3a.sh new file mode 100644 index 00000000000..78b8b51d9d8 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3a.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:s3a + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +export SECURITY_ENABLED=true +export OM_SERVICE_ID="omservice" +export SCM=scm1.org + +source "$COMPOSE_DIR/../common/s3a-test.sh" diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index a998690032a..85294b6b793 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -27,6 +27,7 @@ rm "$ALL_RESULT_DIR"/* || true source "$SCRIPT_DIR"/testlib.sh +: ${OZONE_ACCEPTANCE_TEST_TYPE:="robot"} : ${OZONE_WITH_COVERAGE:="false"} if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then @@ -46,7 +47,9 @@ if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then cp /tmp/jacoco-combined.exec "$SCRIPT_DIR"/result fi -generate_report "acceptance" "${ALL_RESULT_DIR}" "${XUNIT_RESULT_DIR}" - +if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "robot" ]]; then + # does not apply to JUnit tests run via Maven + generate_report "acceptance" "${ALL_RESULT_DIR}" "${XUNIT_RESULT_DIR}" +fi exit $RESULT diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 505cb1ae77c..11db3879bb1 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -429,12 +429,12 @@ copy_results() { target_dir="${target_dir}/${test_script_name}" fi - if [[ -n "$(find "${result_dir}" -name "*.xml")" ]]; then + if command -v rebot > /dev/null 2>&1 && [[ -n "$(find "${result_dir}" -name "*.xml")" ]]; then rebot --nostatusrc -N "${test_name}" -l NONE -r NONE -o "${all_result_dir}/${test_name}.xml" "${result_dir}"/*.xml \ && rm -fv "${result_dir}"/*.xml "${result_dir}"/log.html "${result_dir}"/report.html fi - mkdir -p "${target_dir}" + mkdir -pv "${target_dir}" mv -v "${result_dir}"/* "${target_dir}"/ } diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot index d1a6c5c7d00..685f57fd2bb 100644 --- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot @@ -167,6 +167,7 @@ Test key handling Should Not Contain ${result} NOTICE.txt.1 exists ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | jq -r '. | select(.name=="key1")' Should contain ${result} creationTime + Should not contain ${result} ETag ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | jq -r '.[] | select(.name=="key1") | .name' Should Be Equal ${result} key1 Execute ozone sh key rename ${protocol}${server}/${volume}/bb1 key1 key2 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index 840fb963d8d..b20537014dd 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -23,6 +23,7 @@ ${ENDPOINT_URL} http://s3g:9878 ${OZONE_S3_HEADER_VERSION} v4 ${OZONE_S3_SET_CREDENTIALS} true ${BUCKET} generated +${BUCKET_LAYOUT} OBJECT_STORE ${KEY_NAME} key1 ${OZONE_S3_TESTS_SET_UP} ${FALSE} ${OZONE_AWS_ACCESS_KEY_ID} ${EMPTY} @@ -127,16 +128,12 @@ Create bucket with name ${result} = Execute AWSS3APICli create-bucket --bucket ${bucket} Should contain ${result} Location Should contain ${result} ${bucket} -Create legacy bucket - ${postfix} = Generate Ozone String - ${legacy_bucket} = Set Variable legacy-bucket-${postfix} - ${result} = Execute and checkrc ozone sh bucket create -l LEGACY s3v/${legacy_bucket} 0 - [Return] ${legacy_bucket} -Create obs bucket +Create bucket with layout + [Arguments] ${layout} ${postfix} = Generate Ozone String - ${bucket} = Set Variable obs-bucket-${postfix} - ${result} = Execute and checkrc ozone sh bucket create -l OBJECT_STORE s3v/${bucket} 0 + ${bucket} = Set Variable bucket-${postfix} + ${result} = Execute ozone sh bucket create --layout ${layout} s3v/${bucket} [Return] ${bucket} Setup s3 tests @@ -144,7 +141,7 @@ Setup s3 tests Run Keyword Generate random prefix Run Keyword Install aws cli Run Keyword if '${OZONE_S3_SET_CREDENTIALS}' == 'true' Setup v4 headers - Run Keyword if '${BUCKET}' == 'generated' Create generated bucket + Run Keyword if '${BUCKET}' == 'generated' Create generated bucket ${BUCKET_LAYOUT} Run Keyword if '${BUCKET}' == 'link' Setup links for S3 tests Run Keyword if '${BUCKET}' == 'encrypted' Create encrypted bucket Run Keyword if '${BUCKET}' == 'erasure' Create EC bucket @@ -154,18 +151,19 @@ Setup links for S3 tests ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/link Return From Keyword If ${exists} Execute ozone sh volume create o3://${OM_SERVICE_ID}/legacy - Execute ozone sh bucket create o3://${OM_SERVICE_ID}/legacy/source-bucket + Execute ozone sh bucket create --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/legacy/source-bucket Create link link Create generated bucket - ${BUCKET} = Create bucket + [Arguments] ${layout}=OBJECT_STORE + ${BUCKET} = Create bucket with layout ${layout} Set Global Variable ${BUCKET} Create encrypted bucket Return From Keyword if '${SECURITY_ENABLED}' == 'false' ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/encrypted Return From Keyword If ${exists} - Execute ozone sh bucket create -k ${KEY_NAME} o3://${OM_SERVICE_ID}/s3v/encrypted + Execute ozone sh bucket create -k ${KEY_NAME} --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/encrypted Create link [arguments] ${bucket} @@ -175,7 +173,7 @@ Create link Create EC bucket ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/erasure Return From Keyword If ${exists} - Execute ozone sh bucket create --replication rs-3-2-1024k --type EC o3://${OM_SERVICE_ID}/s3v/erasure + Execute ozone sh bucket create --replication rs-3-2-1024k --type EC --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/erasure Generate random prefix ${random} = Generate Ozone String diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot index be0582edd1f..66f3461b01d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot @@ -40,22 +40,23 @@ Head object in non existing bucket ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET}-non-existent --key ${PREFIX}/headobject/key=value/f1 255 Should contain ${result} 404 Should contain ${result} Not Found + Head object where path is a directory - ${legacy-bucket} = Create legacy bucket - ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${legacy-bucket} --key ${PREFIX}/headobject/keyvalue/f1 --body /tmp/testfile 0 - ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${legacy-bucket} --key ${PREFIX}/headobject/keyvalue/ 255 + Pass Execution If '${BUCKET_LAYOUT}' == 'FILE_SYSTEM_OPTIMIZED' does not apply to FSO buckets + ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${BUCKET} --key ${PREFIX}/headobject/keyvalue/f1 --body /tmp/testfile 0 + ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/headobject/keyvalue/ 255 Should contain ${result} 404 Should contain ${result} Not Found Head directory objects - ${obs-bucket} = Create obs bucket - ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${obs-bucket} --key ${PREFIX}/mydir/ --body /tmp/testfile 0 - ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${obs-bucket} --key ${PREFIX}/mydir 255 + Pass Execution If '${BUCKET_LAYOUT}' == 'FILE_SYSTEM_OPTIMIZED' does not apply to FSO buckets + ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${BUCKET} --key ${PREFIX}/mydir/ --body /tmp/testfile 0 + ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/mydir 255 Should contain ${result} 404 Should contain ${result} Not Found - ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${obs-bucket} --key ${PREFIX}/mydir/ 0 + ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/mydir/ 0 Head non existing key ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/non-existent 255 Should contain ${result} 404 - Should contain ${result} Not Found \ No newline at end of file + Should contain ${result} Not Found diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot index 5d340e53324..bbff89e71f8 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot @@ -44,6 +44,8 @@ Put object to s3 Get object from s3 ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile.result Compare files /tmp/testfile /tmp/testfile.result + ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte /tmp/zerobyte.result + Compare files /tmp/zerobyte /tmp/zerobyte.result #This test depends on the previous test case. Can't be executed alone Get object with wrong signature @@ -151,34 +153,14 @@ Incorrect values for end and start offset Should Be Equal ${expectedData} ${actualData} Zero byte file - ${result} = Execute ozone sh bucket info /s3v/${BUCKET} - ${linked} = Execute echo '${result}' | jq -j '.sourceVolume,"/",.sourceBucket' - ${eval} = Evaluate "source" in """${linked}""" - IF ${eval} == ${True} - ${result} = Execute ozone sh bucket info ${linked} - END - ${fsolayout} = Evaluate "OPTIMIZED" in """${result}""" - ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte --range bytes=0-0 /tmp/testfile2.result 255 - IF ${fsolayout} == ${True} - Should contain ${result} NoSuchKey - ELSE Should contain ${result} InvalidRange - END ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte --range bytes=0-1 /tmp/testfile2.result 255 - IF ${fsolayout} == ${True} - Should contain ${result} NoSuchKey - ELSE Should contain ${result} InvalidRange - END ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte --range bytes=0-10000 /tmp/testfile2.result 255 - IF ${fsolayout} == ${True} - Should contain ${result} NoSuchKey - ELSE Should contain ${result} InvalidRange - END Create file with user defined metadata Execute echo "Randomtext" > /tmp/testfile2 @@ -258,4 +240,4 @@ Create key twice with different content and expect different ETags # clean up Execute AWSS3Cli rm s3://${BUCKET}/test_key_to_check_etag_differences Execute rm -rf /tmp/file1 - Execute rm -rf /tmp/file2 \ No newline at end of file + Execute rm -rf /tmp/file2 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot index 70dcfa1abed..e9b5dd5df72 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot @@ -45,13 +45,15 @@ S3 Gateway Secret Already Exists Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True S3 Gateway Generate Secret By Username + [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser Should contain ${result} HTTP/1.1 200 OK ignore_case=True Should Match Regexp ${result} .*.* S3 Gateway Generate Secret By Username For Other User + [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 Should contain ${result} HTTP/1.1 200 OK ignore_case=True - Should Match Regexp ${result} .*.* \ No newline at end of file + Should Match Regexp ${result} .*.* diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot index 0f15f23067b..59725c0416c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot @@ -38,13 +38,15 @@ S3 Gateway Revoke Secret Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username + [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username For Other User + [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 - Should contain ${result} HTTP/1.1 200 OK ignore_case=True \ No newline at end of file + Should contain ${result} HTTP/1.1 200 OK ignore_case=True diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 8b931c49c96..8a20e0a0efe 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1113,6 +1113,7 @@ message BasicKeyInfo { optional hadoop.hdds.ReplicationType type = 5; optional hadoop.hdds.ReplicationFactor factor = 6; optional hadoop.hdds.ECReplicationConfig ecReplicationConfig = 7; + optional string eTag = 8; } message DirectoryInfo { @@ -1288,9 +1289,16 @@ message DeleteKeyArgs { repeated string keys = 3; } +message DeleteKeyError { + optional string key = 1; + optional string errorCode = 2; + optional string errorMsg = 3; +} + message DeleteKeysResponse { optional DeleteKeyArgs unDeletedKeys = 1; optional bool status = 2; + repeated DeleteKeyError errors = 3; } message DeleteKeyResponse { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java index c0872db0fd6..78e67bb8ed5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -32,6 +32,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .UserInfo; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; + /** * Interface for OM Requests to convert to audit objects. */ @@ -80,6 +82,11 @@ default Map buildKeyArgsAuditMap(KeyArgs keyArgs) { auditMap.put(OzoneConsts.REPLICATION_CONFIG, ECReplicationConfig.toString(keyArgs.getEcReplicationConfig())); } + for (HddsProtos.KeyValue item : keyArgs.getMetadataList()) { + if (ETAG.equals(item.getKey())) { + auditMap.put(ETAG, item.getValue()); + } + } return auditMap; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index aa99ac8afec..5213df0fdec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -54,6 +55,7 @@ import java.io.IOException; import java.nio.file.InvalidPathException; import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -92,6 +94,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Exception exception = null; OMClientResponse omClientResponse = null; Result result = null; + Map keyToError = new HashMap<>(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumKeyDeletes(); @@ -147,6 +150,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, objectKey); deleteKeys.remove(keyName); unDeletedKeys.addKeys(keyName); + keyToError.put(keyName, new ErrorInfo(OMException.ResultCodes.KEY_NOT_FOUND.name(), "Key does not exist")); continue; } @@ -164,6 +168,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, LOG.error("Acl check failed for Key: {}", objectKey, ex); deleteKeys.remove(keyName); unDeletedKeys.addKeys(keyName); + keyToError.put(keyName, new ErrorInfo(OMException.ResultCodes.ACCESS_DENIED.name(), "ACL check failed")); } } @@ -181,7 +186,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, final long volumeId = omMetadataManager.getVolumeId(volumeName); omClientResponse = getOmClientResponse(ozoneManager, omKeyInfoList, dirList, omResponse, - unDeletedKeys, deleteStatus, omBucketInfo, volumeId); + unDeletedKeys, keyToError, deleteStatus, omBucketInfo, volumeId); result = Result.SUCCESS; @@ -195,6 +200,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // Add all keys which are failed due to any other exception . for (int i = indexFailed; i < length; i++) { unDeletedKeys.addKeys(deleteKeyArgs.getKeys(i)); + keyToError.put(deleteKeyArgs.getKeys(i), new ErrorInfo(OMException.ResultCodes.INTERNAL_ERROR.name(), + ex.getMessage())); } omResponse.setDeleteKeysResponse( @@ -256,12 +263,18 @@ protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager, List omKeyInfoList, List dirList, OMResponse.Builder omResponse, OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys, + Map keyToErrors, boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId) { OMClientResponse omClientResponse; + List deleteKeyErrors = new ArrayList<>(); + for (Map.Entry key : keyToErrors.entrySet()) { + deleteKeyErrors.add(OzoneManagerProtocolProtos.DeleteKeyError.newBuilder().setKey(key.getKey()) + .setErrorCode(key.getValue().getCode()).setErrorMsg(key.getValue().getMessage()).build()); + } omClientResponse = new OMKeysDeleteResponse(omResponse .setDeleteKeysResponse( DeleteKeysResponse.newBuilder().setStatus(deleteStatus) - .setUnDeletedKeys(unDeletedKeys)) + .setUnDeletedKeys(unDeletedKeys).addAllErrors(deleteKeyErrors)) .setStatus(deleteStatus ? OK : PARTIAL_DELETE).setSuccess(deleteStatus) .build(), omKeyInfoList, ozoneManager.isRatisEnabled(), omBucketInfo.copyObject()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java index 7dd6798f0f4..9e7703ec054 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java @@ -22,6 +22,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -34,7 +35,9 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.ArrayList; import java.util.List; +import java.util.Map; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; @@ -134,12 +137,19 @@ protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager, List omKeyInfoList, List dirList, OzoneManagerProtocolProtos.OMResponse.Builder omResponse, OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys, + Map keyToErrors, boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId) { OMClientResponse omClientResponse; + List deleteKeyErrors = new ArrayList<>(); + for (Map.Entry key : keyToErrors.entrySet()) { + deleteKeyErrors.add(OzoneManagerProtocolProtos.DeleteKeyError.newBuilder() + .setKey(key.getKey()).setErrorCode(key.getValue().getCode()) + .setErrorMsg(key.getValue().getMessage()).build()); + } omClientResponse = new OMKeysDeleteResponseWithFSO(omResponse .setDeleteKeysResponse( OzoneManagerProtocolProtos.DeleteKeysResponse.newBuilder() - .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)) + .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys).addAllErrors(deleteKeyErrors)) .setStatus(deleteStatus ? OK : PARTIAL_DELETE).setSuccess(deleteStatus) .build(), omKeyInfoList, dirList, ozoneManager.isRatisEnabled(), omBucketInfo.copyObject(), volumeId); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java index 91801e1d519..2075bbc8de6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyError; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.junit.jupiter.api.Assertions; @@ -70,6 +71,9 @@ protected void checkDeleteKeysResponse( .getUnDeletedKeys(); Assertions.assertEquals(0, unDeletedKeys.getKeysCount()); + List keyErrors = omClientResponse.getOMResponse().getDeleteKeysResponse() + .getErrorsList(); + Assertions.assertEquals(0, keyErrors.size()); // Check all keys are deleted. for (String deleteKey : deleteKeyList) { @@ -120,6 +124,9 @@ protected void checkDeleteKeysResponseForFailure( .getDeleteKeysResponse().getUnDeletedKeys(); Assertions.assertEquals(1, unDeletedKeys.getKeysCount()); + List keyErrors = omClientResponse.getOMResponse().getDeleteKeysResponse() + .getErrorsList(); + Assertions.assertEquals(1, keyErrors.size()); Assertions.assertEquals("dummy", unDeletedKeys.getKeys(0)); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index b8cd56d5f95..6f4a4d20f36 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.s3.commontypes.EncodingTypeObject; import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata; @@ -67,8 +68,10 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Set; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -444,47 +447,48 @@ public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName, OzoneBucket bucket = getBucket(bucketName); MultiDeleteResponse result = new MultiDeleteResponse(); + List deleteKeys = new ArrayList<>(); + if (request.getObjects() != null) { + Map undeletedKeyResultMap; for (DeleteObject keyToDelete : request.getObjects()) { - long startNanos = Time.monotonicNowNanos(); - try { - bucket.deleteKey(keyToDelete.getKey()); - getMetrics().updateDeleteKeySuccessStats(startNanos); - - if (!request.isQuiet()) { - result.addDeleted(new DeletedObject(keyToDelete.getKey())); - } - } catch (OMException ex) { - if (isAccessDenied(ex)) { - getMetrics().updateDeleteKeyFailureStats(startNanos); - result.addError( - new Error(keyToDelete.getKey(), "PermissionDenied", - ex.getMessage())); - } else if (ex.getResult() != ResultCodes.KEY_NOT_FOUND) { - getMetrics().updateDeleteKeyFailureStats(startNanos); - result.addError( - new Error(keyToDelete.getKey(), "InternalError", - ex.getMessage())); - } else { + deleteKeys.add(keyToDelete.getKey()); + } + long startNanos = Time.monotonicNowNanos(); + try { + undeletedKeyResultMap = bucket.deleteKeys(deleteKeys, true); + for (DeleteObject d : request.getObjects()) { + ErrorInfo error = undeletedKeyResultMap.get(d.getKey()); + boolean deleted = error == null || + // if the key is not found, it is assumed to be successfully deleted + ResultCodes.KEY_NOT_FOUND.name().equals(error.getCode()); + if (deleted) { + deleteKeys.remove(d.getKey()); if (!request.isQuiet()) { - result.addDeleted(new DeletedObject(keyToDelete.getKey())); + result.addDeleted(new DeletedObject(d.getKey())); } - getMetrics().updateDeleteKeySuccessStats(startNanos); + } else { + result.addError(new Error(d.getKey(), error.getCode(), error.getMessage())); } - } catch (Exception ex) { - getMetrics().updateDeleteKeyFailureStats(startNanos); - result.addError( - new Error(keyToDelete.getKey(), "InternalError", - ex.getMessage())); } + getMetrics().updateDeleteKeySuccessStats(startNanos); + } catch (IOException ex) { + LOG.error("Delete key failed: {}", ex.getMessage()); + getMetrics().updateDeleteKeyFailureStats(startNanos); + result.addError( + new Error("ALL", "InternalError", + ex.getMessage())); } } + + Map auditMap = getAuditParameters(); + auditMap.put("failedDeletes", deleteKeys.toString()); if (result.getErrors().size() != 0) { AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, - getAuditParameters(), new Exception("MultiDelete Exception"))); + auditMap, new Exception("MultiDelete Exception"))); } else { AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(s3GAction, getAuditParameters())); + buildAuditMessageForSuccess(s3GAction, auditMap)); } return result; } @@ -714,7 +718,10 @@ private void addKey(ListObjectResponse response, OzoneKey next) { keyMetadata.setKey(EncodingTypeObject.createNullable(next.getName(), response.getEncodingType())); keyMetadata.setSize(next.getDataSize()); - keyMetadata.setETag("" + next.getModificationTime()); + String eTag = next.getMetadata().get(ETAG); + if (eTag != null) { + keyMetadata.setETag(ObjectEndpoint.wrapInQuotes(eTag)); + } if (next.getReplicationType().toString().equals(ReplicationType .STAND_ALONE.toString())) { keyMetadata.setStorageClass(S3StorageType.REDUCED_REDUNDANCY.toString()); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 44b53f5bd87..b77f6b733eb 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -275,7 +275,9 @@ public Response put( boolean hasAmzDecodedLengthZero = amzDecodedLength != null && Long.parseLong(amzDecodedLength) == 0; if (canCreateDirectory && - (length == 0 || hasAmzDecodedLengthZero)) { + (length == 0 || hasAmzDecodedLengthZero) && + StringUtils.endsWith(keyPath, "/") + ) { s3GAction = S3GAction.CREATE_DIRECTORY; getClientProtocol() .createDirectory(volume.getName(), bucketName, keyPath); @@ -600,7 +602,7 @@ public Response head( // Should not return ETag header if the ETag is not set // doing so will result in "null" string being returned instead // which breaks some AWS SDK implementation - response.header(ETAG, "" + wrapInQuotes(key.getMetadata().get(ETAG))); + response.header(ETAG, wrapInQuotes(key.getMetadata().get(ETAG))); } addLastModifiedDate(response, key); @@ -1346,7 +1348,7 @@ public boolean isDatastreamEnabled() { return datastreamEnabled; } - private String wrapInQuotes(String value) { + static String wrapInQuotes(String value) { return "\"" + value + "\""; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java index a86a92820c0..7448bda3001 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java @@ -33,6 +33,7 @@ import java.io.IOException; import static javax.ws.rs.core.Response.Status.BAD_REQUEST; +import static javax.ws.rs.core.Response.Status.METHOD_NOT_ALLOWED; import static javax.ws.rs.core.Response.Status.NOT_FOUND; /** @@ -53,7 +54,8 @@ public Response generate() throws IOException { @Path("/{username}") public Response generate(@PathParam("username") String username) throws IOException { - return generateInternal(username); + // TODO: It is a temporary solution. To be removed after HDDS-11041 is done. + return Response.status(METHOD_NOT_ALLOWED).build(); } private Response generateInternal(@Nullable String username) throws IOException { @@ -93,7 +95,8 @@ public Response revoke() throws IOException { @Path("/{username}") public Response revoke(@PathParam("username") String username) throws IOException { - return revokeInternal(username); + // TODO: It is a temporary solution. To be removed after HDDS-11041 is done. + return Response.status(METHOD_NOT_ALLOWED).build(); } private Response revokeInternal(@Nullable String username) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 8bf0a882186..f9b659cd1ca 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; @@ -57,6 +58,7 @@ import java.io.IOException; import java.net.URI; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -250,6 +252,13 @@ public void deleteKeys(String volumeName, String bucketName, } + @Override + public Map deleteKeys(String volumeName, String bucketName, + List keyNameList, boolean quiet) + throws IOException { + return new HashMap<>(); + } + @Override public void renameKey(String volumeName, String bucketName, String fromKeyName, String toKeyName) @@ -491,7 +500,7 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, @Override public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { - + getBucket(volumeName, bucketName).createDirectory(keyName); } @Override diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index 1e28c310b85..a886e6cfad7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -37,12 +37,11 @@ import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.ozone.OzoneAcl; @@ -53,9 +52,12 @@ import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts.PartInfo; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; @@ -64,7 +66,9 @@ /** * In-memory ozone bucket for testing. */ -public class OzoneBucketStub extends OzoneBucket { +public final class OzoneBucketStub extends OzoneBucket { + + private static final Logger LOG = LoggerFactory.getLogger(OzoneBucketStub.class); private Map keyDetails = new HashMap<>(); @@ -81,7 +85,7 @@ public static Builder newBuilder() { return new Builder(); } - public OzoneBucketStub(Builder b) { + private OzoneBucketStub(Builder b) { super(b); this.replicationConfig = super.getReplicationConfig(); } @@ -94,43 +98,6 @@ public static final class Builder extends OzoneBucket.Builder { private Builder() { } - @Override - public Builder setVolumeName(String volumeName) { - super.setVolumeName(volumeName); - return this; - } - - @Override - public Builder setName(String name) { - super.setName(name); - return this; - } - - @Override - public Builder setDefaultReplicationConfig( - DefaultReplicationConfig defaultReplicationConfig) { - super.setDefaultReplicationConfig(defaultReplicationConfig); - return this; - } - - @Override - public Builder setStorageType(StorageType storageType) { - super.setStorageType(storageType); - return this; - } - - @Override - public Builder setVersioning(Boolean versioning) { - super.setVersioning(versioning); - return this; - } - - @Override - public Builder setCreationTime(long creationTime) { - super.setCreationTime(creationTime); - return this; - } - @Override public OzoneBucketStub build() { return new OzoneBucketStub(this); @@ -150,31 +117,16 @@ public OzoneOutputStream createKey(String key, long size, ReplicationFactor factor, Map metadata) throws IOException { - ByteArrayOutputStream byteArrayOutputStream = - new ByteArrayOutputStream((int) size) { - @Override - public void close() throws IOException { - keyContents.put(key, toByteArray()); - keyDetails.put(key, new OzoneKeyDetails( - getVolumeName(), - getName(), - key, - size, - System.currentTimeMillis(), - System.currentTimeMillis(), - new ArrayList<>(), replicationConfig, metadata, null, - () -> readKey(key), true - )); - super.close(); - } - }; - return new OzoneOutputStream(byteArrayOutputStream, null); + ReplicationConfig replication = ReplicationConfig.fromTypeAndFactor(type, factor); + return createKey(key, size, replication, metadata); } @Override public OzoneOutputStream createKey(String key, long size, ReplicationConfig rConfig, Map metadata) throws IOException { + assertDoesNotExist(key + "/"); + final ReplicationConfig repConfig; if (rConfig == null) { repConfig = getReplicationConfig(); @@ -209,6 +161,8 @@ public OzoneDataStreamOutput createStreamKey(String key, long size, ReplicationConfig rConfig, Map keyMetadata) throws IOException { + assertDoesNotExist(key + "/"); + ByteBufferStreamOutput byteBufferStreamOutput = new KeyMetadataAwareByteBufferStreamOutput(keyMetadata) { @@ -396,6 +350,17 @@ public void deleteKey(String key) throws IOException { keyDetails.remove(key); } + @Override + public Map deleteKeys(List keyList, boolean quiet) throws IOException { + Map keyErrorMap = new HashMap<>(); + for (String key : keyList) { + if (keyDetails.remove(key) == null) { + keyErrorMap.put(key, new ErrorInfo("KEY_NOT_FOUND", "Key does not exist")); + } + } + return keyErrorMap; + } + @Override public void renameKey(String fromKeyName, String toKeyName) throws IOException { @@ -629,6 +594,9 @@ public ReplicationConfig getReplicationConfig() { @Override public void createDirectory(String keyName) throws IOException { + assertDoesNotExist(StringUtils.stripEnd(keyName, "/")); + + LOG.info("createDirectory({})", keyName); keyDetails.put(keyName, new OzoneKeyDetails( getVolumeName(), getName(), @@ -640,6 +608,12 @@ public void createDirectory(String keyName) throws IOException { () -> readKey(keyName), false)); } + private void assertDoesNotExist(String keyName) throws OMException { + if (keyDetails.get(keyName) != null) { + throw new OMException("already exists", ResultCodes.FILE_ALREADY_EXISTS); + } + } + /** * ByteArrayOutputStream stub with metadata. */ diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java index 9fab5a181b5..4ce18b41f1c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java @@ -38,17 +38,17 @@ /** * Ozone volume with in-memory state for testing. */ -public class OzoneVolumeStub extends OzoneVolume { +public final class OzoneVolumeStub extends OzoneVolume { - private Map buckets = new HashMap<>(); + private final Map buckets = new HashMap<>(); - private ArrayList aclList = new ArrayList<>(); + private final ArrayList aclList = new ArrayList<>(); public static Builder newBuilder() { return new Builder(); } - public OzoneVolumeStub(Builder b) { + private OzoneVolumeStub(Builder b) { super(b); } @@ -124,6 +124,7 @@ public void createBucket(String bucketName, BucketArgs bucketArgs) { .setDefaultReplicationConfig(new DefaultReplicationConfig( RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.THREE))) + .setBucketLayout(bucketArgs.getBucketLayout()) .setStorageType(bucketArgs.getStorageType()) .setVersioning(bucketArgs.getVersioning()) .setCreationTime(Time.now()) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index b8c3a1c805b..2074eed6988 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -23,6 +23,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.stream.Stream; import java.io.OutputStream; import java.security.MessageDigest; import javax.ws.rs.core.HttpHeaders; @@ -30,27 +31,30 @@ import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.http.HttpStatus; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.mockito.MockedStatic; import static java.nio.charset.StandardCharsets.UTF_8; @@ -60,6 +64,7 @@ import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Utils.urlEncode; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -67,8 +72,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.spy; @@ -79,110 +82,104 @@ /** * Test put object. */ -public class TestObjectPut { - public static final String CONTENT = "0123456789"; - private String bucketName = "b1"; - private String keyName = "key=value/1"; - private String destBucket = "b2"; - private String destkey = "key=value/2"; - private String nonexist = "nonexist"; +class TestObjectPut { + private static final String CONTENT = "0123456789"; + private static final String FSO_BUCKET_NAME = "fso-bucket"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_NAME = "key=value/1"; + private static final String DEST_BUCKET_NAME = "b2"; + private static final String DEST_KEY = "key=value/2"; + private static final String NO_SUCH_BUCKET = "nonexist"; + private OzoneClient clientStub; private ObjectEndpoint objectEndpoint; + private HttpHeaders headers; + private OzoneBucket bucket; + private OzoneBucket fsoBucket; + + static Stream argumentsForPutObject() { + ReplicationConfig ratis3 = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE); + ECReplicationConfig ec = new ECReplicationConfig("rs-3-2-1024K"); + return Stream.of( + Arguments.of(0, ratis3), + Arguments.of(10, ratis3), + Arguments.of(0, ec), + Arguments.of(10, ec) + ); + } @BeforeEach - public void setup() throws IOException { + void setup() throws IOException { + OzoneConfiguration config = new OzoneConfiguration(); + //Create client stub and object store stub. clientStub = new OzoneClientStub(); // Create bucket - clientStub.getObjectStore().createS3Bucket(bucketName); - clientStub.getObjectStore().createS3Bucket(destBucket); + clientStub.getObjectStore().createS3Bucket(BUCKET_NAME); + bucket = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME); + clientStub.getObjectStore().createS3Bucket(DEST_BUCKET_NAME); // Create PutObject and setClient to OzoneClientStub objectEndpoint = spy(new ObjectEndpoint()); objectEndpoint.setClient(clientStub); - objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + objectEndpoint.setOzoneConfiguration(config); + + headers = mock(HttpHeaders.class); + objectEndpoint.setHeaders(headers); + + String volumeName = config.get(OzoneConfigKeys.OZONE_S3_VOLUME_NAME, + OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT); + OzoneVolume volume = clientStub.getObjectStore().getVolume(volumeName); + BucketArgs fsoBucketArgs = BucketArgs.newBuilder() + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .build(); + volume.createBucket(FSO_BUCKET_NAME, fsoBucketArgs); + fsoBucket = volume.getBucket(FSO_BUCKET_NAME); } - @Test - public void testPutObject() throws IOException, OS3Exception { + @ParameterizedTest + @MethodSource("argumentsForPutObject") + void testPutObject(int length, ReplicationConfig replication) throws IOException, OS3Exception { //GIVEN - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); + final String content = RandomStringUtils.randomAlphanumeric(length); + ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, body); //THEN - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); - assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); - } - @Test - public void testPutObjectWithECReplicationConfig() - throws IOException, OS3Exception { - //GIVEN - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - ECReplicationConfig ecReplicationConfig = - new ECReplicationConfig("rs-3-2-1024K"); - clientStub.getObjectStore().getS3Bucket(bucketName) - .setReplicationConfig(ecReplicationConfig); - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - - assertEquals(ecReplicationConfig, - clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName) - .getReplicationConfig()); - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); + String keyContent; + try (InputStream input = bucket.readKey(KEY_NAME)) { + keyContent = IOUtils.toString(input, UTF_8); + } + assertEquals(content, keyContent); - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); + OzoneKeyDetails keyDetails = bucket.getKey(KEY_NAME); + assertEquals(replication, keyDetails.getReplicationConfig()); assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); } @Test - public void testPutObjectContentLength() throws IOException, OS3Exception { + void testPutObjectContentLength() throws IOException, OS3Exception { // The contentLength specified when creating the Key should be the same as // the Content-Length, the key Commit will compare the Content-Length with // the actual length of the data written. - HttpHeaders headers = Mockito.mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); long dataSize = CONTENT.length(); - objectEndpoint.put(bucketName, keyName, dataSize, 0, null, body); - assertEquals(dataSize, getKeyDataSize(keyName)); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, body); + assertEquals(dataSize, getKeyDataSize()); } @Test - public void testPutObjectContentLengthForStreaming() + void testPutObjectContentLengthForStreaming() throws IOException, OS3Exception { - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - objectEndpoint.setHeaders(headers); - String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" @@ -193,22 +190,19 @@ public void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(bucketName, keyName, chunkedContent.length(), 0, null, + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); - assertEquals(15, getKeyDataSize(keyName)); + assertEquals(15, getKeyDataSize()); } - private long getKeyDataSize(String key) throws IOException { - return clientStub.getObjectStore().getS3Bucket(bucketName) - .getKey(key).getDataSize(); + private long getKeyDataSize() throws IOException { + return clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_NAME).getDataSize(); } @Test - public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { + void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //GIVEN - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - objectEndpoint.setHeaders(headers); - String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" @@ -220,21 +214,21 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { .thenReturn("15"); //WHEN - Response response = objectEndpoint.put(bucketName, keyName, + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 1, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(200, response.getStatus()); assertEquals("1234567890abcde", keyContent); assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); } @Test @@ -246,12 +240,10 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception .thenThrow(IOException.class); when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); - HttpHeaders headers = mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); try { - objectEndpoint.put(bucketName, keyName, CONTENT + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT .length(), 1, null, body); fail("Should throw IOException"); } catch (IOException ignored) { @@ -263,13 +255,10 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception } @Test - public void testCopyObject() throws IOException, OS3Exception { + void testCopyObject() throws IOException, OS3Exception { // Put object in to source bucket - HttpHeaders headers = Mockito.mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; // Add some custom metadata MultivaluedMap metadataHeaders = new MultivaluedHashMap<>(); @@ -279,20 +268,20 @@ public void testCopyObject() throws IOException, OS3Exception { // Add COPY metadata directive (default) when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); - Response response = objectEndpoint.put(bucketName, keyName, + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() - .getS3Bucket(bucketName) - .readKey(keyName); + .getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); assertEquals("custom-value-1", keyDetails.getMetadata().get("custom-key-1")); assertEquals("custom-value-2", keyDetails.getMetadata().get("custom-key-2")); @@ -303,25 +292,25 @@ public void testCopyObject() throws IOException, OS3Exception { // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); - response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, + response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body); // Check destination key and response - ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket) - .readKey(destkey); + ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) + .readKey(DEST_KEY); keyContent = IOUtils.toString(ozoneInputStream, UTF_8); OzoneKeyDetails sourceKeyDetails = clientStub.getObjectStore() - .getS3Bucket(bucketName).getKey(keyName); + .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() - .getS3Bucket(destBucket).getKey(destkey); + .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); // Source key eTag should remain unchanged and the dest key should have // the same Etag since the key content is the same assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); @@ -335,17 +324,17 @@ public void testCopyObject() throws IOException, OS3Exception { metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1"); metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); - response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, - null, body); + response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, + null, body); - ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket) - .readKey(destkey); + ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) + .readKey(DEST_KEY); keyContent = IOUtils.toString(ozoneInputStream, UTF_8); sourceKeyDetails = clientStub.getObjectStore() - .getS3Bucket(bucketName).getKey(keyName); + .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); destKeyDetails = clientStub.getObjectStore() - .getS3Bucket(destBucket).getKey(destkey); + .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); @@ -363,46 +352,40 @@ public void testCopyObject() throws IOException, OS3Exception { // wrong copy metadata directive when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - destBucket, destkey, CONTENT.length(), 1, null, body), - "test copy object failed"); - assertEquals(400, e.getHttpCode()); - assertEquals("InvalidArgument", e.getCode()); - assertTrue(e.getErrorMessage().contains("The metadata directive specified is invalid")); + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body), + "test copy object failed"); + assertThat(e.getHttpCode()).isEqualTo(400); + assertThat(e.getCode()).isEqualTo("InvalidArgument"); + assertThat(e.getErrorMessage()).contains("The metadata directive specified is invalid"); when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); - // source and dest same - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body), - "test copy object failed"); - assertTrue(e.getErrorMessage().contains("This copy request is illegal")); - // source bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(destBucket, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); - assertTrue(e.getCode().contains("NoSuchBucket")); + NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertTrue(e.getCode().contains("NoSuchBucket")); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertTrue(e.getCode().contains("NoSuchBucket")); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(nonexist)); + BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", keyName, CONTENT.length(), 1, null, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, null, body), "test copy object failed"); assertTrue(e.getCode().contains("NoSuchBucket")); } @@ -410,26 +393,23 @@ public void testCopyObject() throws IOException, OS3Exception { @Test public void testCopyObjectMessageDigestResetDuringException() throws IOException, OS3Exception { // Put object in to source bucket - HttpHeaders headers = mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; - Response response = objectEndpoint.put(bucketName, keyName, + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() - .getS3Bucket(bucketName) - .readKey(keyName); + .getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); MessageDigest messageDigest = mock(MessageDigest.class); try (MockedStatic mocked = mockStatic(IOUtils.class)) { @@ -440,10 +420,10 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); try { - objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, + objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body); fail("Should throw IOException"); } catch (IOException ignored) { @@ -455,113 +435,69 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException } @Test - public void testInvalidStorageType() throws IOException { - HttpHeaders headers = Mockito.mock(HttpHeaders.class); + void testInvalidStorageType() { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body)); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); } @Test - public void testEmptyStorageType() throws IOException, OS3Exception { - HttpHeaders headers = Mockito.mock(HttpHeaders.class); + void testEmptyStorageType() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - objectEndpoint.put(bucketName, keyName, CONTENT + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT .length(), 1, null, body); OzoneKeyDetails key = - clientStub.getObjectStore().getS3Bucket(bucketName) - .getKey(keyName); - + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_NAME); //default type is set - assertEquals(ReplicationType.RATIS, key.getReplicationType()); + assertEquals( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + key.getReplicationConfig()); } @Test - public void testDirectoryCreation() throws IOException, + void testDirectoryCreation() throws IOException, OS3Exception { // GIVEN - final String path = "dir"; - final long length = 0L; - final int partNumber = 0; - final String uploadId = ""; - final InputStream body = null; - final HttpHeaders headers = Mockito.mock(HttpHeaders.class); - final ObjectEndpoint objEndpoint = new ObjectEndpoint(); - objEndpoint.setOzoneConfiguration(new OzoneConfiguration()); - objEndpoint.setHeaders(headers); - final OzoneClient client = Mockito.mock(OzoneClient.class); - objEndpoint.setClient(client); - final ObjectStore objectStore = Mockito.mock(ObjectStore.class); - final OzoneVolume volume = Mockito.mock(OzoneVolume.class); - final OzoneBucket bucket = Mockito.mock(OzoneBucket.class); - final ClientProtocol protocol = Mockito.mock(ClientProtocol.class); + final String path = "dir/"; // WHEN - when(client.getObjectStore()).thenReturn(objectStore); - when(client.getObjectStore().getS3Volume()).thenReturn(volume); - when(volume.getBucket(bucketName)).thenReturn(bucket); - when(bucket.getBucketLayout()) - .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED); - when(client.getProxy()).thenReturn(protocol); - final Response response = objEndpoint.put(bucketName, path, length, - partNumber, uploadId, body); + try (Response response = objectEndpoint.put(fsoBucket.getName(), path, + 0L, 0, "", null)) { + assertEquals(HttpStatus.SC_OK, response.getStatus()); + } // THEN - assertEquals(HttpStatus.SC_OK, response.getStatus()); - Mockito.verify(protocol).createDirectory(any(), eq(bucketName), eq(path)); + OzoneKeyDetails key = fsoBucket.getKey(path); + assertThat(key.isFile()).as("directory").isFalse(); } @Test - public void testDirectoryCreationOverFile() throws IOException { + void testDirectoryCreationOverFile() throws IOException, OS3Exception { // GIVEN final String path = "key"; - final long length = 0L; - final int partNumber = 0; - final String uploadId = ""; final ByteArrayInputStream body = - new ByteArrayInputStream("content".getBytes(UTF_8)); - final HttpHeaders headers = Mockito.mock(HttpHeaders.class); - final ObjectEndpoint objEndpoint = new ObjectEndpoint(); - objEndpoint.setOzoneConfiguration(new OzoneConfiguration()); - objEndpoint.setHeaders(headers); - final OzoneClient client = Mockito.mock(OzoneClient.class); - objEndpoint.setClient(client); - final ObjectStore objectStore = Mockito.mock(ObjectStore.class); - final OzoneVolume volume = Mockito.mock(OzoneVolume.class); - final OzoneBucket bucket = Mockito.mock(OzoneBucket.class); - final ClientProtocol protocol = Mockito.mock(ClientProtocol.class); + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", body); // WHEN - when(client.getObjectStore()).thenReturn(objectStore); - when(client.getObjectStore().getS3Volume()).thenReturn(volume); - when(volume.getBucket(bucketName)).thenReturn(bucket); - when(bucket.getBucketLayout()) - .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED); - when(client.getProxy()).thenReturn(protocol); - doThrow(new OMException(OMException.ResultCodes.FILE_ALREADY_EXISTS)) - .when(protocol) - .createDirectory(any(), any(), any()); + final OS3Exception exception = assertThrows(OS3Exception.class, + () -> objectEndpoint + .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null) + .close()); // THEN - final OS3Exception exception = assertThrows(OS3Exception.class, - () -> objEndpoint - .put(bucketName, path, length, partNumber, uploadId, body)); - assertEquals("Conflict", exception.getCode()); - assertEquals(409, exception.getHttpCode()); - Mockito.verify(protocol, times(1)).createDirectory(any(), any(), any()); + assertEquals(S3ErrorTable.NO_OVERWRITE.getCode(), exception.getCode()); + assertEquals(S3ErrorTable.NO_OVERWRITE.getHttpCode(), exception.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index e9a93e3c521..65f7f6c74ed 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -38,6 +39,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -165,7 +167,9 @@ public void testListKey() throws IOException { public void testDeleteKeys() throws IOException, OS3Exception { Mockito.when(objectStore.getVolume(anyString())).thenReturn(volume); Mockito.when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); - doThrow(exception).when(bucket).deleteKey(any()); + Map deleteErrors = new HashMap<>(); + deleteErrors.put("deleteKeyName", new ErrorInfo("ACCESS_DENIED", "ACL check failed")); + when(bucket.deleteKeys(any(), anyBoolean())).thenReturn(deleteErrors); BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); MultiDeleteRequest request = new MultiDeleteRequest(); @@ -178,7 +182,7 @@ public void testDeleteKeys() throws IOException, OS3Exception { bucketEndpoint.multiDelete("BucketName", "keyName", request); assertEquals(1, response.getErrors().size()); assertTrue( - response.getErrors().get(0).getCode().equals("PermissionDenied")); + response.getErrors().get(0).getCode().equals("ACCESS_DENIED")); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java index 007fa9099ee..78efa464c67 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; +import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -111,6 +112,7 @@ void testIfSecretAlreadyExists() throws IOException { } @Test + @Unhealthy("HDDS-11041") void testSecretGenerateWithUsername() throws IOException { hasNoSecretYet(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java index a319496419d..6461cfe80bc 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -97,6 +98,7 @@ void testSecretRevoke() throws IOException { } @Test + @Unhealthy("HDDS-11041") void testSecretRevokeWithUsername() throws IOException { endpoint.revoke(OTHER_USER_NAME); verify(objectStore, times(1))