From 0b1ea27a514e42bc27960f269a70f8a7bb40814b Mon Sep 17 00:00:00 2001 From: saketa Date: Wed, 26 Jul 2023 14:31:14 -0700 Subject: [PATCH 01/15] HDDS-7035. Rebased. --- .../ozone/s3/VirtualHostStyleFilter.java | 6 ++ .../s3/signature/StringToSignProducer.java | 12 ++++ .../apache/hadoop/ozone/s3/util/S3Consts.java | 3 +- .../ozone/s3/TestVirtualHostStyleFilter.java | 16 +++-- .../signature/TestStringToSignProducer.java | 58 +++++++++++++++++++ 5 files changed, 90 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java index a257155e764..38708e5d8cd 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java @@ -39,6 +39,8 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_DOMAIN_NAME; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_PARAM; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_VIRTUAL; /** * Filter used to convert virtual host style pattern to path style pattern. @@ -111,6 +113,10 @@ public void filter(ContainerRequestContext requestContext) throws UriBuilder requestAddrBuilder = UriBuilder.fromUri(baseURI).path(newPath); queryParams.forEach((k, v) -> requestAddrBuilder.queryParam(k, v.toArray())); + // Enhance request URI to indicate virtual-host addressing style + // for later processing. + requestAddrBuilder.queryParam( + ENDPOINT_STYLE_PARAM, ENDPOINT_STYLE_VIRTUAL); URI requestAddr = requestAddrBuilder.build(); requestContext.setRequestUri(baseURI, requestAddr); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java index b3fec69ba1d..f29841a3159 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java @@ -45,6 +45,9 @@ import com.google.common.annotations.VisibleForTesting; import static java.time.temporal.ChronoUnit.SECONDS; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_PARAM; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_PATH; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_VIRTUAL; import org.apache.kerby.util.Hex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -170,6 +173,15 @@ public static String buildCanonicalRequest( for (String p : parts) { encParts.add(urlEncode(p)); } + if (queryParams.getOrDefault(ENDPOINT_STYLE_PARAM, ENDPOINT_STYLE_PATH) + .equals(ENDPOINT_STYLE_VIRTUAL)) { + // Remove bucket name from URI if virtual-host style addressing is used. + encParts.remove(1); + if (encParts.size() == 1) { + encParts.add(1, ""); + } + queryParams.remove(ENDPOINT_STYLE_PARAM); + } String canonicalUri = join("/", encParts); String canonicalQueryStr = getQueryParamString(queryParams); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index 05e9503225c..06c2c7dd4d7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -64,7 +64,8 @@ private S3Consts() { public static final String CUSTOM_METADATA_HEADER_PREFIX = "x-amz-meta-"; + public static final String DECODED_CONTENT_LENGTH_HEADER = "x-amz-decoded-content-length"; - + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java index 19d9380de91..2f81c1c6a14 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.ozone.test.GenericTestUtils; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.glassfish.jersey.internal.PropertiesDelegate; import org.glassfish.jersey.server.ContainerRequest; import org.junit.Assert; @@ -106,7 +107,8 @@ public void testVirtualHostStyle() throws Exception { ".localhost:9878", "/myfile", null, true); virtualHostStyleFilter.filter(containerRequest); URI expected = new URI("http://" + s3HttpAddr + - "/mybucket/myfile"); + "/mybucket/myfile?" + S3Consts.ENDPOINT_STYLE_PARAM + "=" + + S3Consts.ENDPOINT_STYLE_VIRTUAL); Assert.assertEquals(expected, containerRequest.getRequestUri()); } @@ -136,7 +138,9 @@ public void testVirtualHostStyleWithCreateBucketRequest() throws Exception { ContainerRequest containerRequest = createContainerRequest("mybucket" + ".localhost:9878", null, null, true); virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + "/mybucket"); + URI expected = new URI("http://" + s3HttpAddr + "/mybucket?" + + S3Consts.ENDPOINT_STYLE_PARAM + "=" + + S3Consts.ENDPOINT_STYLE_VIRTUAL); Assert.assertEquals(expected, containerRequest.getRequestUri()); } @@ -151,7 +155,9 @@ public void testVirtualHostStyleWithQueryParams() throws Exception { ContainerRequest containerRequest = createContainerRequest("mybucket" + ".localhost:9878", null, "?prefix=bh", true); virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + "/mybucket?prefix=bh"); + URI expected = new URI("http://" + s3HttpAddr + "/mybucket?prefix=bh&" + + S3Consts.ENDPOINT_STYLE_PARAM + "=" + + S3Consts.ENDPOINT_STYLE_VIRTUAL); assertTrue(expected.toString().contains(containerRequest.getRequestUri() .toString())); @@ -159,7 +165,9 @@ public void testVirtualHostStyleWithQueryParams() throws Exception { ".localhost:9878", null, "?prefix=bh&type=dir", true); virtualHostStyleFilter.filter(containerRequest); expected = new URI("http://" + s3HttpAddr + - "/mybucket?prefix=bh&type=dir"); + "/mybucket?prefix=bh&type=dir&" + + S3Consts.ENDPOINT_STYLE_PARAM + "=" + + S3Consts.ENDPOINT_STYLE_VIRTUAL); assertTrue(expected.toString().contains(containerRequest.getRequestUri() .toString())); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java index 9d521721791..c454d9c5125 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java @@ -44,6 +44,8 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.DATE_FORMATTER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_PARAM; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_VIRTUAL; import static org.junit.jupiter.params.provider.Arguments.arguments; /** @@ -269,4 +271,60 @@ public void testValidateCanonicalHeaders( Assert.assertEquals(expectedResult, actualResult); } + + @Test + public void testVirtualStyleAddressURI() throws Exception { + String canonicalRequest = "GET\n" + + "/\n" + + "\n" + + "host:bucket1.s3g.internal:9878\nx-amz-content-sha256:Content-SHA\n" + + "x-amz-date:" + DATETIME + "\n" + + "\n" + + "host;x-amz-content-sha256;x-amz-date\n" + + "Content-SHA"; + + String authHeader = + "AWS4-HMAC-SHA256 Credential=AKIAJWFJK62WUTKNFJJA/20181009/us-east-1" + + "/s3/aws4_request, " + + "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " + + "Signature" + + + "=db81b057718d7c1b3b8dffa29933099551c51d787b3b13b9e0f9ebed45982bf2"; + + MultivaluedMap headers = new MultivaluedHashMap<>(); + headers.putSingle("Authorization", authHeader); + headers.putSingle("Content-Length", "123"); + headers.putSingle("Host", "bucket1.s3g.internal:9878"); + headers.putSingle("X-AMZ-Content-Sha256", "Content-SHA"); + headers.putSingle("X-AMZ-Date", DATETIME); + + final SignatureInfo signatureInfo = + new AuthorizationV4HeaderParser(authHeader, DATETIME) { + @Override + public void validateDateRange(Credential credentialObj) { + //NOOP + } + }.parseSignature(); + + ContainerRequestContext context = setupContext( + new URI("http://bucket1.s3g.internal:9878?" + + ENDPOINT_STYLE_PARAM + "=" + ENDPOINT_STYLE_VIRTUAL), + "GET", + headers, + new MultivaluedHashMap<>()); + + final String signatureBase = + StringToSignProducer.createSignatureBase(signatureInfo, context); + + MessageDigest md = MessageDigest.getInstance("SHA-256"); + md.update(canonicalRequest.getBytes(StandardCharsets.UTF_8)); + + Assert.assertEquals( + "String to sign is invalid", + "AWS4-HMAC-SHA256\n" + + DATETIME + "\n" + + "20181009/us-east-1/s3/aws4_request\n" + + Hex.encode(md.digest()).toLowerCase(), + signatureBase); + } } From be5016b43a717e3d2ac71cc307e186850b6a993c Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 29 Aug 2023 14:58:49 -0700 Subject: [PATCH 02/15] HDDS-7035. Removed custom variables. Moved String to Sign generation to AuthFilter. --- .../ozonesecure-ha/s3g-virtual-host.yaml | 47 +++++ .../ozonesecure-ha/test-s3g-virtual-host.sh | 34 ++++ .../src/main/compose/ozonesecure-ha/test.sh | 6 +- .../main/smoketest/s3/awss3virtualhost.robot | 56 ++++++ .../src/main/smoketest/s3/commonawslib.robot | 4 + .../hadoop/ozone/s3/AuthorizationFilter.java | 103 +++++++++++ .../hadoop/ozone/s3/ClientIpFilter.java | 3 +- .../hadoop/ozone/s3/HeaderPreprocessor.java | 6 +- .../hadoop/ozone/s3/OzoneClientProducer.java | 64 ------- .../ozone/s3/VirtualHostStyleFilter.java | 15 +- .../ozone/s3/endpoint/EndpointBase.java | 8 + .../s3/signature/AWSSignatureProcessor.java | 2 + .../ozone/s3/signature/SignatureInfo.java | 73 ++++++++ .../s3/signature/StringToSignProducer.java | 15 +- .../apache/hadoop/ozone/s3/util/S3Consts.java | 4 +- .../ozone/s3/TestAuthorizationFilter.java | 165 ++++++++++++++++++ .../ozone/s3/TestOzoneClientProducer.java | 151 +--------------- .../ozone/s3/TestVirtualHostStyleFilter.java | 31 ++-- .../TestAuthorizationV4QueryParser.java | 4 +- .../signature/TestStringToSignProducer.java | 62 +------ 20 files changed, 533 insertions(+), 320 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-ha/s3g-virtual-host.yaml create mode 100755 hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3g-virtual-host.sh create mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/awss3virtualhost.robot create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/s3g-virtual-host.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/s3g-virtual-host.yaml new file mode 100644 index 00000000000..708231ad20e --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/s3g-virtual-host.yaml @@ -0,0 +1,47 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3.8" + +x-s3g-virtual-host-config: + &s3g-virtual-host-config + environment: + - OZONE-SITE.XML_ozone.s3g.domain.name=s3g.internal +services: + datanode1: + <<: *s3g-virtual-host-config + datanode2: + <<: *s3g-virtual-host-config + datanode3: + <<: *s3g-virtual-host-config + om1: + <<: *s3g-virtual-host-config + om2: + <<: *s3g-virtual-host-config + om3: + <<: *s3g-virtual-host-config + scm1.org: + <<: *s3g-virtual-host-config + scm2.org: + <<: *s3g-virtual-host-config + scm3.org: + <<: *s3g-virtual-host-config + s3g: + <<: *s3g-virtual-host-config + extra_hosts: + - "bucket1.s3g.internal: 172.25.0.114" + recon: + <<: *s3g-virtual-host-config diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3g-virtual-host.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3g-virtual-host.sh new file mode 100755 index 00000000000..2bc3125801f --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-s3g-virtual-host.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:HA-secure + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +export SECURITY_ENABLED=true +export OM_SERVICE_ID="omservice" +export SCM=scm1.org +export COMPOSE_FILE=docker-compose.yaml:s3g-virtual-host.yaml + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env + +## Run virtual host test cases +execute_robot_test s3g -N s3-virtual-host s3/awss3virtualhost.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh index 70d45b33c65..fc57e7a782e 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh @@ -41,11 +41,13 @@ execute_robot_test s3g -v SCHEME:o3fs -v BUCKET_TYPE:link -N ozonefs-o3fs-link o execute_robot_test s3g basic/links.robot -exclude="" +## Exclude virtual-host.robot +exclude="--exclude virtual-host" for bucket in encrypted link; do execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once - exclude="--exclude no-bucket-type" + ## Exclude virtual-host.robot + exclude="--exclude virtual-host --exclude no-bucket-type" done execute_robot_test s3g admincli diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3virtualhost.robot b/hadoop-ozone/dist/src/main/smoketest/s3/awss3virtualhost.robot new file mode 100644 index 00000000000..8f77e2c3e87 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/awss3virtualhost.robot @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation S3 gateway test with aws cli using virtual host style address +Library OperatingSystem +Library String +Resource ../commonlib.robot +Resource ./commonawslib.robot +Test Timeout 5 minutes +Suite Setup Setup s3 tests +Default Tags virtual-host + +*** Variables *** +${ENDPOINT_URL} http://s3g.internal:9878 +${BUCKET} bucket1 +${OZONE_S3_ADDRESS_STYLE} virtual + +*** Test Cases *** + +File upload and directory list with virtual style addressing + Create bucket with name ${BUCKET} + Execute date > /tmp/testfile + ${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET} --debug + Should contain ${result} url=http://bucket1.s3g.internal:9878/ + Should contain ${result} upload + ${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/dir1/dir2/file --debug + Should contain ${result} url=http://bucket1.s3g.internal:9878/dir1/dir2/file + Should contain ${result} upload + ${result} = Execute AWSS3Cli ls s3://${BUCKET} --debug + Should contain ${result} url=http://bucket1.s3g.internal:9878/ + Should contain ${result} testfile + Should contain ${result} dir1 + Should not contain ${result} dir2 + ${result} = Execute AWSS3Cli ls s3://${BUCKET}/dir1/ --debug + Should contain ${result} url=http://bucket1.s3g.internal:9878/ + Should contain ${result} prefix=dir1 + Should not contain ${result} testfile + Should contain ${result} dir2/ + ${result} = Execute AWSS3Cli ls s3://${BUCKET}/dir1/dir2/file + Should not contain ${result} testfile + Should not contain ${result} dir1 + Should not contain ${result} dir2 + Should contain ${result} file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index 0a6a42f005f..f0f617eb1b6 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -26,6 +26,7 @@ ${BUCKET} generated ${KEY_NAME} key1 ${OZONE_S3_TESTS_SET_UP} ${FALSE} ${OZONE_AWS_ACCESS_KEY_ID} ${EMPTY} +${OZONE_S3_ADDRESS_STYLE} path *** Keywords *** Execute AWSS3APICli @@ -85,6 +86,9 @@ Setup secure v4 headers Execute aws configure set aws_access_key_id ${accessKey} Execute aws configure set aws_secret_access_key ${secret} Execute aws configure set region us-west-1 + Return From Keyword If '${OZONE_S3_ADDRESS_STYLE}' != 'virtual' + Execute aws configure set default.s3.addressing_style virtual + Setup dummy credentials for S3 Execute aws configure set default.s3.signature_version s3v4 diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java new file mode 100644 index 00000000000..eb41aa663c8 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -0,0 +1,103 @@ +package org.apache.hadoop.ozone.s3; + +import javax.annotation.Priority; +import javax.inject.Inject; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.PreMatching; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.Provider; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.signature.SignatureInfo; +import org.apache.hadoop.ozone.s3.signature.SignatureInfo.Version; +import org.apache.hadoop.ozone.s3.signature.SignatureProcessor; +import org.apache.hadoop.ozone.s3.signature.StringToSignProducer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ACCESS_DENIED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INTERNAL_ERROR; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; + +/** + * Filter used to construct string to sign from unfiltered request. + * It should be executed before all other filters as the original + * could be enriched later. + */ + +@Provider +@PreMatching +@Priority(AuthorizationFilter.PRIORITY) +public class AuthorizationFilter implements ContainerRequestFilter { + public static final int PRIORITY = 50; + + private static final Logger LOG = LoggerFactory.getLogger( + AuthorizationFilter.class); + + @Inject + private SignatureProcessor signatureProcessor; + + @Inject + private SignatureInfo signatureInfo; + + @Override + public void filter(ContainerRequestContext context) throws + IOException { + + try { + signatureInfo.initialize(signatureProcessor.parseSignature()); + if (signatureInfo.getVersion() == Version.V4) { + signatureInfo.setStrToSign( + StringToSignProducer.createSignatureBase(signatureInfo, context)); + } else { + LOG.debug("Unsupported AWS signature version: {}", + signatureInfo.getVersion()); + throw S3_AUTHINFO_CREATION_ERROR; + } + + String awsAccessId = signatureInfo.getAwsAccessId(); + // ONLY validate aws access id when needed. + if (awsAccessId == null || awsAccessId.equals("")) { + LOG.debug("Malformed s3 header. awsAccessID: {}", awsAccessId); + throw ACCESS_DENIED; + } + } catch (OS3Exception ex) { + LOG.debug("Error during Client Creation: ", ex); + throw wrapOS3Exception(ex); + } catch (Exception e) { + // For any other critical errors during object creation throw Internal + // error. + LOG.debug("Error during Client Creation: ", e); + throw wrapOS3Exception( + S3ErrorTable.newError(INTERNAL_ERROR, null, e)); + } + } + + @VisibleForTesting + public void setSignatureParser(SignatureProcessor awsSignatureProcessor) { + this.signatureProcessor = awsSignatureProcessor; + } + + @VisibleForTesting + public void setSignatureInfo(SignatureInfo signatureInfo) { + this.signatureInfo = signatureInfo; + } + + @VisibleForTesting + public SignatureInfo getSignatureInfo() { + return signatureInfo; + } + + private WebApplicationException wrapOS3Exception(OS3Exception os3Exception) { + return new WebApplicationException(os3Exception.getErrorMessage(), + os3Exception, + Response.status(os3Exception.getHttpCode()) + .entity(os3Exception.toXml()).build()); + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/ClientIpFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/ClientIpFilter.java index 921b18d9b59..858ebbcd0b9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/ClientIpFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/ClientIpFilter.java @@ -38,7 +38,8 @@ @Priority(ClientIpFilter.PRIORITY) public class ClientIpFilter implements ContainerRequestFilter { - public static final int PRIORITY = 200; + public static final int PRIORITY = HeaderPreprocessor.PRIORITY + + S3GatewayHttpServer.FILTER_PRIORITY_DO_AFTER; public static final String CLIENT_IP_HEADER = "client_ip"; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java index 344fd909716..c9deb226496 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java @@ -34,10 +34,12 @@ */ @Provider @PreMatching -@Priority(VirtualHostStyleFilter.PRIORITY - + S3GatewayHttpServer.FILTER_PRIORITY_DO_AFTER) +@Priority(HeaderPreprocessor.PRIORITY) public class HeaderPreprocessor implements ContainerRequestFilter { + public static final int PRIORITY = VirtualHostStyleFilter.PRIORITY + + S3GatewayHttpServer.FILTER_PRIORITY_DO_AFTER; + public static final String MULTIPART_UPLOAD_MARKER = "ozone/mpu"; public static final String CONTENT_TYPE = "Content-Type"; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java index bfab4559a35..50e1f80e9c2 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java @@ -24,26 +24,13 @@ import javax.ws.rs.WebApplicationException; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.core.Context; -import javax.ws.rs.core.Response; import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.om.protocol.S3Auth; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.signature.SignatureInfo; -import org.apache.hadoop.ozone.s3.signature.SignatureInfo.Version; -import org.apache.hadoop.ozone.s3.signature.SignatureProcessor; -import org.apache.hadoop.ozone.s3.signature.StringToSignProducer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ACCESS_DENIED; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INTERNAL_ERROR; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; - /** * This class creates the OzoneClient for the Rest endpoints. */ @@ -55,9 +42,6 @@ public class OzoneClientProducer { private OzoneClient client; - @Inject - private SignatureProcessor signatureProcessor; - @Inject private OzoneConfiguration ozoneConfiguration; @@ -76,42 +60,6 @@ public synchronized OzoneClient createClient() throws WebApplicationException, public void destroy() throws IOException { client.getObjectStore().getClientProxy().clearThreadLocalS3Auth(); } - @Produces - public S3Auth getSignature() { - try { - SignatureInfo signatureInfo = signatureProcessor.parseSignature(); - String stringToSign = ""; - if (signatureInfo.getVersion() == Version.V4) { - stringToSign = - StringToSignProducer.createSignatureBase(signatureInfo, context); - } else { - LOG.debug("Unsupported AWS signature version: {}", - signatureInfo.getVersion()); - throw S3_AUTHINFO_CREATION_ERROR; - } - - String awsAccessId = signatureInfo.getAwsAccessId(); - // ONLY validate aws access id when needed. - if (awsAccessId == null || awsAccessId.equals("")) { - LOG.debug("Malformed s3 header. awsAccessID: {}", awsAccessId); - throw ACCESS_DENIED; - } - - // Note: userPrincipal is initialized to be the same value as accessId, - // could be updated later in RpcClient#getS3Volume - return new S3Auth(stringToSign, - signatureInfo.getSignature(), - awsAccessId, awsAccessId); - } catch (OS3Exception ex) { - LOG.debug("Error during Client Creation: ", ex); - throw wrapOS3Exception(ex); - } catch (Exception e) { - // For any other critical errors during object creation throw Internal - // error. - LOG.debug("Error during Client Creation: ", e); - throw wrapOS3Exception(S3ErrorTable.newError(INTERNAL_ERROR, null, e)); - } - } private OzoneClient getClient(OzoneConfiguration config) throws IOException { @@ -133,16 +81,4 @@ private OzoneClient getClient(OzoneConfiguration config) public synchronized void setOzoneConfiguration(OzoneConfiguration config) { this.ozoneConfiguration = config; } - - @VisibleForTesting - public void setSignatureParser(SignatureProcessor awsSignatureProcessor) { - this.signatureProcessor = awsSignatureProcessor; - } - - private WebApplicationException wrapOS3Exception(OS3Exception os3Exception) { - return new WebApplicationException(os3Exception.getErrorMessage(), - os3Exception, - Response.status(os3Exception.getHttpCode()) - .entity(os3Exception.toXml()).build()); - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java index 38708e5d8cd..81235fb9d56 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java @@ -39,8 +39,6 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_DOMAIN_NAME; -import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_PARAM; -import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_VIRTUAL; /** * Filter used to convert virtual host style pattern to path style pattern. @@ -51,7 +49,8 @@ @Priority(VirtualHostStyleFilter.PRIORITY) public class VirtualHostStyleFilter implements ContainerRequestFilter { - public static final int PRIORITY = 100; + public static final int PRIORITY = AuthorizationFilter.PRIORITY + + S3GatewayHttpServer.FILTER_PRIORITY_DO_AFTER; private static final Logger LOG = LoggerFactory.getLogger( VirtualHostStyleFilter.class); @@ -105,18 +104,14 @@ public void filter(ContainerRequestContext requestContext) throws URI baseURI = requestContext.getUriInfo().getBaseUri(); String currentPath = requestContext.getUriInfo().getPath(); String newPath = bucketName; - if (currentPath != null) { - newPath += String.format("%s", currentPath); - } MultivaluedMap queryParams = requestContext.getUriInfo() .getQueryParameters(); UriBuilder requestAddrBuilder = UriBuilder.fromUri(baseURI).path(newPath); + if (currentPath != null) { + requestAddrBuilder.path(currentPath); + } queryParams.forEach((k, v) -> requestAddrBuilder.queryParam(k, v.toArray())); - // Enhance request URI to indicate virtual-host addressing style - // for later processing. - requestAddrBuilder.queryParam( - ENDPOINT_STYLE_PARAM, ENDPOINT_STYLE_VIRTUAL); URI requestAddr = requestAddrBuilder.build(); requestContext.setRequestUri(baseURI, requestAddr); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 66fb7df190a..083c530c5da 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -57,6 +57,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; +import org.apache.hadoop.ozone.s3.signature.SignatureInfo; import org.apache.hadoop.ozone.s3.util.AuditUtils; import org.apache.hadoop.util.Time; import org.slf4j.Logger; @@ -75,6 +76,8 @@ public abstract class EndpointBase implements Auditor { @Inject private OzoneClient client; @Inject + SignatureInfo signatureInfo; + private S3Auth s3Auth; @Context private ContainerRequestContext context; @@ -114,6 +117,11 @@ protected OzoneBucket getBucket(OzoneVolume volume, String bucketName) */ @PostConstruct public void initialization() { + // Note: userPrincipal is initialized to be the same value as accessId, + // could be updated later in RpcClient#getS3Volume + s3Auth = new S3Auth(signatureInfo.getStringToSign(), + signatureInfo.getSignature(), + signatureInfo.getAwsAccessId(), signatureInfo.getAwsAccessId()); LOG.debug("S3 access id: {}", s3Auth.getAccessID()); getClient().getObjectStore() .getClientProxy() diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java index 1ae8682186b..43a1e6b7130 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java @@ -95,6 +95,8 @@ public SignatureInfo parseSignature() throws OS3Exception { "", "", "", "", "", "", "", false ); } + signatureInfo.setUnfilteredURI( + context.getUriInfo().getRequestUri().getPath()); return signatureInfo; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureInfo.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureInfo.java index 0da8895921b..90f1922ae97 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureInfo.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureInfo.java @@ -17,11 +17,14 @@ */ package org.apache.hadoop.ozone.s3.signature; +import javax.enterprise.context.RequestScoped; + /** * Signature and related information. *

* Required to create stringToSign and token. */ +@RequestScoped public class SignatureInfo { private Version version; @@ -48,6 +51,13 @@ public class SignatureInfo { private boolean signPayload = true; + private String unfilteredURI; + + + private String stringToSign; + + public SignatureInfo(){} + @SuppressWarnings("checkstyle:ParameterNumber") public SignatureInfo( Version version, @@ -59,6 +69,51 @@ public SignatureInfo( String credentialScope, String algorithm, boolean signPayload + ) { + this(version, date, dateTime, awsAccessId, signature, signedHeaders, + credentialScope, algorithm, signPayload, null, null); + } + + public SignatureInfo( + Version version, + String date, + String dateTime, + String awsAccessId, + String signature, + String signedHeaders, + String credentialScope, + String algorithm, + boolean signPayload, + String uri, + String stringToSign + ) { + initialize(version, date, dateTime, awsAccessId, signature, signedHeaders, + credentialScope, algorithm, signPayload, uri, stringToSign); + } + + public void initialize( + SignatureInfo signatureInfo + ) { + initialize(signatureInfo.getVersion(), signatureInfo.getDate(), + signatureInfo.getDateTime(), signatureInfo.getAwsAccessId(), + signatureInfo.getSignature(), signatureInfo.getSignedHeaders(), + signatureInfo.getCredentialScope(), signatureInfo.getAlgorithm(), + signatureInfo.isSignPayload(), signatureInfo.getUnfilteredURI(), + signatureInfo.getStringToSign()); + } + + public void initialize( + Version version, + String date, + String dateTime, + String awsAccessId, + String signature, + String signedHeaders, + String credentialScope, + String algorithm, + boolean signPayload, + String uri, + String stringToSign ) { this.version = version; this.date = date; @@ -69,6 +124,8 @@ public SignatureInfo( this.credentialScope = credentialScope; this.algorithm = algorithm; this.signPayload = signPayload; + this.unfilteredURI = uri; + this.stringToSign = stringToSign; } public String getAwsAccessId() { @@ -107,6 +164,22 @@ public String getDateTime() { return dateTime; } + public String getUnfilteredURI() { + return unfilteredURI; + } + + public String getStringToSign() { + return stringToSign; + } + + public void setUnfilteredURI(String uri) { + this.unfilteredURI = uri; + } + + public void setStrToSign(String strToSign) { + this.stringToSign = strToSign; + } + /** * Signature version. */ diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java index f29841a3159..a8a4f6072d2 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java @@ -45,9 +45,6 @@ import com.google.common.annotations.VisibleForTesting; import static java.time.temporal.ChronoUnit.SECONDS; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; -import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_PARAM; -import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_PATH; -import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_VIRTUAL; import org.apache.kerby.util.Hex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,7 +81,6 @@ public static String createSignatureBase( return createSignatureBase(signatureInfo, context.getUriInfo().getRequestUri().getScheme(), context.getMethod(), - context.getUriInfo().getRequestUri().getPath(), LowerCaseKeyStringMap.fromHeaderMap(context.getHeaders()), fromMultiValueToSingleValueMap( context.getUriInfo().getQueryParameters())); @@ -95,7 +91,6 @@ public static String createSignatureBase( SignatureInfo signatureInfo, String scheme, String method, - String uri, LowerCaseKeyStringMap headers, Map queryParams ) throws Exception { @@ -114,6 +109,7 @@ public static String createSignatureBase( String credentialScope = signatureInfo.getCredentialScope(); // If the absolute path is empty, use a forward slash (/) + String uri = signatureInfo.getUnfilteredURI(); uri = (uri.trim().length() > 0) ? uri : "/"; // Encode URI and preserve forward slashes strToSign.append(signatureInfo.getAlgorithm() + NEWLINE); @@ -173,15 +169,6 @@ public static String buildCanonicalRequest( for (String p : parts) { encParts.add(urlEncode(p)); } - if (queryParams.getOrDefault(ENDPOINT_STYLE_PARAM, ENDPOINT_STYLE_PATH) - .equals(ENDPOINT_STYLE_VIRTUAL)) { - // Remove bucket name from URI if virtual-host style addressing is used. - encParts.remove(1); - if (encParts.size() == 1) { - encParts.add(1, ""); - } - queryParams.remove(ENDPOINT_STYLE_PARAM); - } String canonicalUri = join("/", encParts); String canonicalQueryStr = getQueryParamString(queryParams); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index 06c2c7dd4d7..e151c7b82c5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -63,9 +63,9 @@ private S3Consts() { ".com/doc/2006-03-01/"; public static final String CUSTOM_METADATA_HEADER_PREFIX = "x-amz-meta-"; - + public static final String DECODED_CONTENT_LENGTH_HEADER = "x-amz-decoded-content-length"; - + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java new file mode 100644 index 00000000000..8f1b8cfaf4c --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java @@ -0,0 +1,165 @@ +package org.apache.hadoop.ozone.s3; + +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.UriInfo; +import java.net.URI; +import java.util.stream.Stream; + +import org.apache.hadoop.ozone.s3.signature.AWSSignatureProcessor; + +import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; +import static java.net.HttpURLConnection.HTTP_FORBIDDEN; + +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; +import static org.apache.hadoop.ozone.s3.signature.SignatureParser.AUTHORIZATION_HEADER; +import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.CONTENT_MD5; +import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.CONTENT_TYPE; +import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.HOST_HEADER; +import static org.apache.hadoop.ozone.s3.signature.StringToSignProducer.X_AMAZ_DATE; +import static org.apache.hadoop.ozone.s3.signature.StringToSignProducer.X_AMZ_CONTENT_SHA256; +import static org.junit.Assert.fail; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +import org.apache.hadoop.ozone.s3.signature.SignatureInfo; +import org.apache.hadoop.ozone.s3.signature.SignatureProcessor; +import org.junit.Assert; +import org.junit.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.mockito.Mockito; + +/** + * This class test string to sign generation. + */ +public class TestAuthorizationFilter { + + private AuthorizationFilter authorizationFilter = new AuthorizationFilter(); + + private MultivaluedMap headerMap; + private MultivaluedMap queryMap; + private MultivaluedMap pathParamsMap; + + private static StreamtestAuthFilterFailuresInput() { + return Stream.of( + arguments( + "AWS4-HMAC-SHA256 Credential=testuser1/20190221/us-west-1/s3" + + "/aws4_request, SignedHeaders=content-md5;host;" + + "x-amz-content-sha256;x-amz-date, " + + "Signature" + + "=56ec73ba1974f8feda8365c3caef89c5d4a688d5f9baccf47" + + "65f46a14cd745ad", + "Zi68x2nPDDXv5qfDC+ZWTg==", + "s3g:9878", + "e2bd43f11c97cde3465e0e8d1aad77af7ec7aa2ed8e213cd0e24" + + "1e28375860c6", + "20190221T002037Z", + "", + "/" + ), + arguments( + "AWS4-HMAC-SHA256 " + + "Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request," + + " SignedHeaders=content-type;host;x-amz-date, " + + "Signature=" + + "5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400" + + "e06b5924a6f2b5d7", + "", + "iam.amazonaws.com", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "20150830T123600Z", + "application/x-www-form-urlencoded; charset=utf-8", + "" + ), + arguments(null, null, null, null, null, null, null), + arguments("", null, null, null, null, null, null), + // AWS V2 signature + arguments( + "AWS AKIDEXAMPLE:St7bHPOdkmsX/GITGe98rOQiUCg=", + "", + "s3g:9878", + "", + "Wed, 22 Mar 2023 17:00:06 +0000", + "application/octet-stream", + "/" + ) + ); + } + + @ParameterizedTest + @MethodSource("testAuthFilterFailuresInput") + public void testAuthFilterFailures( + String authHeader, String contentMd5, + String host, String amzContentSha256, String date, String contentType, + String path + ) { + headerMap = new MultivaluedHashMap<>(); + queryMap = new MultivaluedHashMap<>(); + pathParamsMap = new MultivaluedHashMap<>(); + try { + System.err.println("Testing: " + authHeader); + headerMap.putSingle(AUTHORIZATION_HEADER, authHeader); + headerMap.putSingle(CONTENT_MD5, contentMd5); + headerMap.putSingle(HOST_HEADER, host); + headerMap.putSingle(X_AMZ_CONTENT_SHA256, amzContentSha256); + headerMap.putSingle(X_AMAZ_DATE, date); + headerMap.putSingle(CONTENT_TYPE, contentType); + + UriInfo uriInfo = Mockito.mock(UriInfo.class); + ContainerRequestContext context = Mockito.mock(ContainerRequestContext.class); + Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryMap); + Mockito.when(uriInfo.getRequestUri()).thenReturn( + new URI("http://" + host+path)); + + Mockito.when(context.getUriInfo()).thenReturn(uriInfo); + Mockito.when(context.getHeaders()).thenReturn(headerMap); + Mockito.when(context.getHeaderString(AUTHORIZATION_HEADER)) + .thenReturn(authHeader); + Mockito.when(context.getUriInfo().getQueryParameters()) + .thenReturn(queryMap); + Mockito.when(context.getUriInfo().getPathParameters()) + .thenReturn(pathParamsMap); + + AWSSignatureProcessor awsSignatureProcessor = new AWSSignatureProcessor(); + awsSignatureProcessor.setContext(context); + + SignatureInfo signatureInfo = new SignatureInfo(); + + authorizationFilter.setSignatureParser(awsSignatureProcessor); + authorizationFilter.setSignatureInfo(signatureInfo); + + authorizationFilter.filter(context); + if ("".equals(authHeader)) { + fail("Empty AuthHeader must fail"); + } + } catch (WebApplicationException ex) { + if (authHeader == null || authHeader.isEmpty() || + authHeader.startsWith("AWS ")) { + // Empty auth header and unsupported AWS signature + // should fail with Invalid Request. + Assert.assertEquals(HTTP_FORBIDDEN, ex.getResponse().getStatus()); + Assert.assertEquals(S3_AUTHINFO_CREATION_ERROR.getErrorMessage(), + ex.getMessage()); + } else { + // Other requests have stale timestamp and + // should fail with Malformed Authorization Header. + Assert.assertEquals(HTTP_BAD_REQUEST, ex.getResponse().getStatus()); + Assert.assertEquals(MALFORMED_HEADER.getErrorMessage(), + ex.getMessage()); + + } + + } catch (Exception ex) { + fail("Unexpected exception: " + ex); + } + } + + //testStrToSign generation + +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java index 064d9836f6f..13efecbde40 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java @@ -30,118 +30,29 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.s3.signature.AWSSignatureProcessor; -import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; -import static java.net.HttpURLConnection.HTTP_FORBIDDEN; - -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; -import static org.apache.hadoop.ozone.s3.signature.SignatureParser.AUTHORIZATION_HEADER; -import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.CONTENT_MD5; -import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.CONTENT_TYPE; -import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.HOST_HEADER; -import static org.apache.hadoop.ozone.s3.signature.StringToSignProducer.X_AMAZ_DATE; -import static org.apache.hadoop.ozone.s3.signature.StringToSignProducer.X_AMZ_CONTENT_SHA256; import static org.junit.Assert.fail; import org.junit.Assert; import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; import org.mockito.Mockito; /** * Test class for @{@link OzoneClientProducer}. */ -@RunWith(Parameterized.class) public class TestOzoneClientProducer { private OzoneClientProducer producer; - private MultivaluedMap headerMap; - private MultivaluedMap queryMap; - private MultivaluedMap pathParamsMap; - private String authHeader; - private String contentMd5; - private String host; - private String amzContentSha256; - private String date; - private String contentType; - private ContainerRequestContext context; - private UriInfo uriInfo; - public TestOzoneClientProducer( - String authHeader, String contentMd5, - String host, String amzContentSha256, String date, String contentType - ) + public TestOzoneClientProducer() throws Exception { - this.authHeader = authHeader; - this.contentMd5 = contentMd5; - this.host = host; - this.amzContentSha256 = amzContentSha256; - this.date = date; - this.contentType = contentType; producer = new OzoneClientProducer(); - headerMap = new MultivaluedHashMap<>(); - queryMap = new MultivaluedHashMap<>(); - pathParamsMap = new MultivaluedHashMap<>(); - uriInfo = Mockito.mock(UriInfo.class); - context = Mockito.mock(ContainerRequestContext.class); OzoneConfiguration config = new OzoneConfiguration(); config.setBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true); config.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, ""); - setupContext(); producer.setOzoneConfiguration(config); } - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - { - "AWS4-HMAC-SHA256 Credential=testuser1/20190221/us-west-1/s3" + - "/aws4_request, SignedHeaders=content-md5;host;" + - "x-amz-content-sha256;x-amz-date, " + - "Signature" + - "=56ec73ba1974f8feda8365c3caef89c5d4a688d5f9baccf47" + - "65f46a14cd745ad", - "Zi68x2nPDDXv5qfDC+ZWTg==", - "s3g:9878", - "e2bd43f11c97cde3465e0e8d1aad77af7ec7aa2ed8e213cd0e24" + - "1e28375860c6", - "20190221T002037Z", - "" - }, - { - "AWS4-HMAC-SHA256 " + - "Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request," + - " SignedHeaders=content-type;host;x-amz-date, " + - "Signature=" + - "5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400" + - "e06b5924a6f2b5d7", - "", - "iam.amazonaws.com", - "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "20150830T123600Z", - "application/x-www-form-urlencoded; charset=utf-8" - }, - { - null, null, null, null, null, null - }, - { - "", null, null, null, null, null - }, - // AWS V2 signature - { - "AWS AKIDEXAMPLE:St7bHPOdkmsX/GITGe98rOQiUCg=", - "", - "s3g:9878", - "", - "Wed, 22 Mar 2023 17:00:06 +0000", - "application/octet-stream" - } - }); - } - @Test public void testGetClientFailure() { try { @@ -152,40 +63,6 @@ public void testGetClientFailure() { } } - @Test - public void testGetSignature() { - try { - System.err.println("Testing: " + authHeader); - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "ozone1"); - configuration.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "ozone1addr:9399"); - producer.setOzoneConfiguration(configuration); - producer.getSignature(); - if ("".equals(authHeader)) { - fail("Empty AuthHeader must fail"); - } - } catch (WebApplicationException ex) { - if (authHeader == null || authHeader.isEmpty() || - authHeader.startsWith("AWS ")) { - // Empty auth header and unsupported AWS signature - // should fail with Invalid Request. - Assert.assertEquals(HTTP_FORBIDDEN, ex.getResponse().getStatus()); - Assert.assertEquals(S3_AUTHINFO_CREATION_ERROR.getErrorMessage(), - ex.getMessage()); - } else { - // Other requests have stale timestamp and - // should fail with Malformed Authorization Header. - Assert.assertEquals(HTTP_BAD_REQUEST, ex.getResponse().getStatus()); - Assert.assertEquals(MALFORMED_HEADER.getErrorMessage(), - ex.getMessage()); - - } - - } catch (Exception ex) { - fail("Unexpected exception: " + ex); - } - } - @Test public void testGetClientFailureWithMultipleServiceIds() { try { @@ -219,30 +96,4 @@ public void testGetClientFailureWithMultipleServiceIdsAndInternalServiceId() { } } - private void setupContext() throws Exception { - headerMap.putSingle(AUTHORIZATION_HEADER, authHeader); - headerMap.putSingle(CONTENT_MD5, contentMd5); - headerMap.putSingle(HOST_HEADER, host); - headerMap.putSingle(X_AMZ_CONTENT_SHA256, amzContentSha256); - headerMap.putSingle(X_AMAZ_DATE, date); - headerMap.putSingle(CONTENT_TYPE, contentType); - - Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryMap); - Mockito.when(uriInfo.getRequestUri()).thenReturn(new URI("")); - - Mockito.when(context.getUriInfo()).thenReturn(uriInfo); - Mockito.when(context.getHeaders()).thenReturn(headerMap); - Mockito.when(context.getHeaderString(AUTHORIZATION_HEADER)) - .thenReturn(authHeader); - Mockito.when(context.getUriInfo().getQueryParameters()) - .thenReturn(queryMap); - Mockito.when(context.getUriInfo().getPathParameters()) - .thenReturn(pathParamsMap); - - AWSSignatureProcessor awsSignatureProcessor = new AWSSignatureProcessor(); - awsSignatureProcessor.setContext(context); - - producer.setSignatureParser(awsSignatureProcessor); - } - } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java index 2f81c1c6a14..d4f4e794a02 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java @@ -20,7 +20,6 @@ import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.ozone.test.GenericTestUtils; -import org.apache.hadoop.ozone.s3.util.S3Consts; import org.glassfish.jersey.internal.PropertiesDelegate; import org.glassfish.jersey.server.ContainerRequest; import org.junit.Assert; @@ -106,9 +105,7 @@ public void testVirtualHostStyle() throws Exception { ContainerRequest containerRequest = createContainerRequest("mybucket" + ".localhost:9878", "/myfile", null, true); virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + - "/mybucket/myfile?" + S3Consts.ENDPOINT_STYLE_PARAM + "=" + - S3Consts.ENDPOINT_STYLE_VIRTUAL); + URI expected = new URI("http://" + s3HttpAddr + "/mybucket/myfile"); Assert.assertEquals(expected, containerRequest.getRequestUri()); } @@ -138,26 +135,34 @@ public void testVirtualHostStyleWithCreateBucketRequest() throws Exception { ContainerRequest containerRequest = createContainerRequest("mybucket" + ".localhost:9878", null, null, true); virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + "/mybucket?" + - S3Consts.ENDPOINT_STYLE_PARAM + "=" + - S3Consts.ENDPOINT_STYLE_VIRTUAL); + URI expected = new URI("http://" + s3HttpAddr + "/mybucket"); Assert.assertEquals(expected, containerRequest.getRequestUri()); } + @Test + public void testVirtualHostStyleWithCreateKeyRequest() throws Exception { + VirtualHostStyleFilter virtualHostStyleFilter = + new VirtualHostStyleFilter(); + virtualHostStyleFilter.setConfiguration(conf); + + ContainerRequest containerRequest = createContainerRequest("mybucket" + + ".localhost:9878", "/key1", null, true); + virtualHostStyleFilter.filter(containerRequest); + URI expected = new URI("http://" + s3HttpAddr + "/mybucket/key1"); + Assert.assertEquals(expected, containerRequest.getRequestUri()); + } + @Test public void testVirtualHostStyleWithQueryParams() throws Exception { VirtualHostStyleFilter virtualHostStyleFilter = new VirtualHostStyleFilter(); virtualHostStyleFilter.setConfiguration(conf); - + URI expected = new URI("http://" + s3HttpAddr + "/mybucket?prefix=bh"); ContainerRequest containerRequest = createContainerRequest("mybucket" + ".localhost:9878", null, "?prefix=bh", true); virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + "/mybucket?prefix=bh&" + - S3Consts.ENDPOINT_STYLE_PARAM + "=" + - S3Consts.ENDPOINT_STYLE_VIRTUAL); assertTrue(expected.toString().contains(containerRequest.getRequestUri() .toString())); @@ -165,9 +170,7 @@ public void testVirtualHostStyleWithQueryParams() throws Exception { ".localhost:9878", null, "?prefix=bh&type=dir", true); virtualHostStyleFilter.filter(containerRequest); expected = new URI("http://" + s3HttpAddr + - "/mybucket?prefix=bh&type=dir&" + - S3Consts.ENDPOINT_STYLE_PARAM + "=" + - S3Consts.ENDPOINT_STYLE_VIRTUAL); + "/mybucket?prefix=bh&type=dir"); assertTrue(expected.toString().contains(containerRequest.getRequestUri() .toString())); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestAuthorizationV4QueryParser.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestAuthorizationV4QueryParser.java index 7221dfbb02e..181e962e5ca 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestAuthorizationV4QueryParser.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestAuthorizationV4QueryParser.java @@ -284,14 +284,14 @@ protected void validateDateAndExpires() { }; final SignatureInfo signatureInfo = parser.parseSignature(); + signatureInfo.setUnfilteredURI("/test.txt"); LowerCaseKeyStringMap headers = new LowerCaseKeyStringMap(); headers.put("host", "localhost"); final String stringToSign = StringToSignProducer.createSignatureBase(signatureInfo, "https", "GET", - "/test.txt", headers, - queryParams); + headers, queryParams); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(canonicalRequest.getBytes(StandardCharsets.UTF_8)); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java index c454d9c5125..14aa6494766 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java @@ -44,8 +44,6 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.DATE_FORMATTER; -import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_PARAM; -import static org.apache.hadoop.ozone.s3.util.S3Consts.ENDPOINT_STYLE_VIRTUAL; import static org.junit.jupiter.params.provider.Arguments.arguments; /** @@ -97,6 +95,7 @@ public void validateDateRange(Credential credentialObj) { //NOOP } }.parseSignature(); + signatureInfo.setUnfilteredURI("/buckets"); headers.fixContentType(); @@ -105,7 +104,6 @@ public void validateDateRange(Credential credentialObj) { signatureInfo, "http", "GET", - "/buckets", headers, queryParameters); @@ -205,6 +203,7 @@ public void testValidateRequestHeaders( SignatureInfo signatureInfo = new AuthorizationV4HeaderParser( headerMap.getFirst("Authorization"), headerMap.getFirst("X-Amz-Date")).parseSignature(); + signatureInfo.setUnfilteredURI("/"); try { StringToSignProducer.createSignatureBase(signatureInfo, context); } catch (OS3Exception e) { @@ -262,6 +261,7 @@ public void testValidateCanonicalHeaders( SignatureInfo signatureInfo = new AuthorizationV4HeaderParser( headerMap.getFirst("Authorization"), headerMap.getFirst("x-amz-date")).parseSignature(); + signatureInfo.setUnfilteredURI("/"); try { StringToSignProducer.createSignatureBase(signatureInfo, context); @@ -271,60 +271,4 @@ public void testValidateCanonicalHeaders( Assert.assertEquals(expectedResult, actualResult); } - - @Test - public void testVirtualStyleAddressURI() throws Exception { - String canonicalRequest = "GET\n" - + "/\n" - + "\n" - + "host:bucket1.s3g.internal:9878\nx-amz-content-sha256:Content-SHA\n" - + "x-amz-date:" + DATETIME + "\n" - + "\n" - + "host;x-amz-content-sha256;x-amz-date\n" - + "Content-SHA"; - - String authHeader = - "AWS4-HMAC-SHA256 Credential=AKIAJWFJK62WUTKNFJJA/20181009/us-east-1" - + "/s3/aws4_request, " - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " - + "Signature" - + - "=db81b057718d7c1b3b8dffa29933099551c51d787b3b13b9e0f9ebed45982bf2"; - - MultivaluedMap headers = new MultivaluedHashMap<>(); - headers.putSingle("Authorization", authHeader); - headers.putSingle("Content-Length", "123"); - headers.putSingle("Host", "bucket1.s3g.internal:9878"); - headers.putSingle("X-AMZ-Content-Sha256", "Content-SHA"); - headers.putSingle("X-AMZ-Date", DATETIME); - - final SignatureInfo signatureInfo = - new AuthorizationV4HeaderParser(authHeader, DATETIME) { - @Override - public void validateDateRange(Credential credentialObj) { - //NOOP - } - }.parseSignature(); - - ContainerRequestContext context = setupContext( - new URI("http://bucket1.s3g.internal:9878?" + - ENDPOINT_STYLE_PARAM + "=" + ENDPOINT_STYLE_VIRTUAL), - "GET", - headers, - new MultivaluedHashMap<>()); - - final String signatureBase = - StringToSignProducer.createSignatureBase(signatureInfo, context); - - MessageDigest md = MessageDigest.getInstance("SHA-256"); - md.update(canonicalRequest.getBytes(StandardCharsets.UTF_8)); - - Assert.assertEquals( - "String to sign is invalid", - "AWS4-HMAC-SHA256\n" - + DATETIME + "\n" - + "20181009/us-east-1/s3/aws4_request\n" - + Hex.encode(md.digest()).toLowerCase(), - signatureBase); - } } From 1a05023210b28ef5c1de3ce194c49bd8508bff47 Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 29 Aug 2023 16:09:00 -0700 Subject: [PATCH 03/15] HDDS-7035. Fixed checkstyle errors. --- .../ozone/s3/endpoint/EndpointBase.java | 2 +- .../ozone/s3/signature/SignatureInfo.java | 26 ++++--------------- .../ozone/s3/TestAuthorizationFilter.java | 9 +++---- .../ozone/s3/TestOzoneClientProducer.java | 9 ------- 4 files changed, 9 insertions(+), 37 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 083c530c5da..97027abd7c8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -76,7 +76,7 @@ public abstract class EndpointBase implements Auditor { @Inject private OzoneClient client; @Inject - SignatureInfo signatureInfo; + private SignatureInfo signatureInfo; private S3Auth s3Auth; @Context diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureInfo.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureInfo.java index 90f1922ae97..d1db38a50aa 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureInfo.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureInfo.java @@ -51,12 +51,12 @@ public class SignatureInfo { private boolean signPayload = true; - private String unfilteredURI; + private String unfilteredURI = null; - private String stringToSign; + private String stringToSign = null; - public SignatureInfo(){} + public SignatureInfo() { } @SuppressWarnings("checkstyle:ParameterNumber") public SignatureInfo( @@ -69,26 +69,9 @@ public SignatureInfo( String credentialScope, String algorithm, boolean signPayload - ) { - this(version, date, dateTime, awsAccessId, signature, signedHeaders, - credentialScope, algorithm, signPayload, null, null); - } - - public SignatureInfo( - Version version, - String date, - String dateTime, - String awsAccessId, - String signature, - String signedHeaders, - String credentialScope, - String algorithm, - boolean signPayload, - String uri, - String stringToSign ) { initialize(version, date, dateTime, awsAccessId, signature, signedHeaders, - credentialScope, algorithm, signPayload, uri, stringToSign); + credentialScope, algorithm, signPayload, null, null); } public void initialize( @@ -102,6 +85,7 @@ public void initialize( signatureInfo.getStringToSign()); } + @SuppressWarnings({"checkstyle:ParameterNumber", "checkstyle:HiddenField"}) public void initialize( Version version, String date, diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java index 8f1b8cfaf4c..3e0b589ca99 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java @@ -25,14 +25,10 @@ import static org.junit.jupiter.params.provider.Arguments.arguments; import org.apache.hadoop.ozone.s3.signature.SignatureInfo; -import org.apache.hadoop.ozone.s3.signature.SignatureProcessor; import org.junit.Assert; -import org.junit.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; import org.mockito.Mockito; /** @@ -112,10 +108,11 @@ public void testAuthFilterFailures( headerMap.putSingle(CONTENT_TYPE, contentType); UriInfo uriInfo = Mockito.mock(UriInfo.class); - ContainerRequestContext context = Mockito.mock(ContainerRequestContext.class); + ContainerRequestContext context = Mockito.mock( + ContainerRequestContext.class); Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryMap); Mockito.when(uriInfo.getRequestUri()).thenReturn( - new URI("http://" + host+path)); + new URI("http://" + host + path)); Mockito.when(context.getUriInfo()).thenReturn(uriInfo); Mockito.when(context.getHeaders()).thenReturn(headerMap); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java index 13efecbde40..ed7a8be944a 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java @@ -17,15 +17,7 @@ */ package org.apache.hadoop.ozone.s3; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.core.MultivaluedHashMap; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.core.UriInfo; import java.io.IOException; -import java.net.URI; -import java.util.Arrays; -import java.util.Collection; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -35,7 +27,6 @@ import org.junit.Assert; import org.junit.Test; -import org.mockito.Mockito; /** * Test class for @{@link OzoneClientProducer}. From 8a11cbca8b313ec1b68632f39eaa6337bae34050 Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 29 Aug 2023 16:49:28 -0700 Subject: [PATCH 04/15] HDDS-7035. Added licenses to new classes. --- .../hadoop/ozone/s3/AuthorizationFilter.java | 17 +++++++++++++++++ .../ozone/s3/TestAuthorizationFilter.java | 17 +++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java index eb41aa663c8..515519b5194 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.ozone.s3; import javax.annotation.Priority; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java index 3e0b589ca99..26fbc0f09d9 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.ozone.s3; import javax.ws.rs.WebApplicationException; From 5cae64b2f0c656ebf3791207eabf06dba498a540 Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 5 Sep 2023 19:00:02 -0700 Subject: [PATCH 05/15] HDDS-7035. Fixed acceptance test errors. --- hadoop-ozone/dist/src/main/compose/common/ec-test.sh | 3 ++- hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh | 5 +++-- .../dist/src/main/compose/ozone/disabled-test-s3-haproxy.sh | 3 ++- hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh | 2 +- hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh | 3 ++- hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot | 3 +-- .../java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java | 5 +++++ .../org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java | 5 +++++ .../main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java | 2 +- 9 files changed, 22 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh index cfc46f058ad..65a659563e5 100755 --- a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh @@ -17,7 +17,8 @@ start_docker_env 5 -execute_robot_test scm -v BUCKET:erasure s3 +## Exclude virtual-host tests. This is tested separately as it requires additional config. +execute_robot_test scm -v BUCKET:erasure --exclude virtual-host s3 prefix=${RANDOM} execute_robot_test scm -v PREFIX:${prefix} ec/basic.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh index 2d73133fdd9..08c85226961 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh @@ -35,11 +35,12 @@ execute_robot_test ${SCM} basic/links.robot execute_robot_test ${SCM} -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-ofs-link ozonefs/ozonefs.robot -exclude="" +## Exclude virtual-host tests. This is tested separately as it requires additional config. +exclude="--exclude virtual-host" for bucket in generated; do execute_robot_test ${SCM} -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once - exclude="--exclude no-bucket-type" + exclude="--exclude virtual-host --exclude no-bucket-type" done execute_robot_test ${SCM} freon diff --git a/hadoop-ozone/dist/src/main/compose/ozone/disabled-test-s3-haproxy.sh b/hadoop-ozone/dist/src/main/compose/ozone/disabled-test-s3-haproxy.sh index 9612b92da47..6cf3901b9d2 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/disabled-test-s3-haproxy.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/disabled-test-s3-haproxy.sh @@ -26,4 +26,5 @@ source "$COMPOSE_DIR/../testlib.sh" start_docker_env -execute_robot_test scm s3 +## Exclude virtual-host tests. This is tested separately as it requires additional config. +execute_robot_test scm --exclude virtual-host s3 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh index fc57e7a782e..f6ab69746c3 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh @@ -41,7 +41,7 @@ execute_robot_test s3g -v SCHEME:o3fs -v BUCKET_TYPE:link -N ozonefs-o3fs-link o execute_robot_test s3g basic/links.robot -## Exclude virtual-host.robot +## Exclude virtual-host tests. This is tested separately as it requires additional config. exclude="--exclude virtual-host" for bucket in encrypted link; do execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh index f0af40792d0..f2bc0948feb 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh @@ -28,4 +28,5 @@ export COMPOSE_FILE=docker-compose.yaml:vault.yaml start_docker_env -execute_robot_test scm s3 +## Exclude virtual-host tests. This is tested separately as it requires additional config. +execute_robot_test scm --exclude virtual-host s3 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index f0f617eb1b6..98691876e12 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -86,8 +86,7 @@ Setup secure v4 headers Execute aws configure set aws_access_key_id ${accessKey} Execute aws configure set aws_secret_access_key ${secret} Execute aws configure set region us-west-1 - Return From Keyword If '${OZONE_S3_ADDRESS_STYLE}' != 'virtual' - Execute aws configure set default.s3.addressing_style virtual + Execute aws configure set default.s3.addressing_style ${OZONE_S3_ADDRESS_STYLE} Setup dummy credentials for S3 diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java index 515519b5194..16665fccd4b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -66,6 +66,11 @@ public class AuthorizationFilter implements ContainerRequestFilter { @Override public void filter(ContainerRequestContext context) throws IOException { + // Skip authentication if the uri is hitting S3Secret generation or + // revocation endpoint. + if (context.getUriInfo().getPath().startsWith("secret")) { + return; + } try { signatureInfo.initialize(signatureProcessor.parseSignature()); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java index 81235fb9d56..bcce5ddf164 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java @@ -63,6 +63,11 @@ public class VirtualHostStyleFilter implements ContainerRequestFilter { @Override public void filter(ContainerRequestContext requestContext) throws IOException { + // Skip this filter if the uri is hitting S3Secret generation or + // revocation endpoint. + if (requestContext.getUriInfo().getPath().startsWith("secret")) { + return; + } domains = conf.getTrimmedStrings(OZONE_S3G_DOMAIN_NAME); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index e151c7b82c5..df3d01936b1 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -63,7 +63,7 @@ private S3Consts() { ".com/doc/2006-03-01/"; public static final String CUSTOM_METADATA_HEADER_PREFIX = "x-amz-meta-"; - + public static final String DECODED_CONTENT_LENGTH_HEADER = "x-amz-decoded-content-length"; From 96cf32061ee69a295d356ca45efc091d0298242e Mon Sep 17 00:00:00 2001 From: saketa Date: Wed, 6 Sep 2023 16:35:17 -0700 Subject: [PATCH 06/15] HDDS-7035. Added more Unit tests. --- .../hadoop/ozone/s3/AuthorizationFilter.java | 2 +- .../ozone/s3/VirtualHostStyleFilter.java | 3 +- .../ozone/s3/TestAuthorizationFilter.java | 228 +++++++++++++++--- .../ozone/s3/TestVirtualHostStyleFilter.java | 23 ++ 4 files changed, 217 insertions(+), 39 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java index 16665fccd4b..d49ff17f3bf 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -68,7 +68,7 @@ public void filter(ContainerRequestContext context) throws IOException { // Skip authentication if the uri is hitting S3Secret generation or // revocation endpoint. - if (context.getUriInfo().getPath().startsWith("secret")) { + if (context.getUriInfo().getRequestUri().getPath().startsWith("/secret")) { return; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java index bcce5ddf164..32c1eb9eb23 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java @@ -65,7 +65,8 @@ public void filter(ContainerRequestContext requestContext) throws IOException { // Skip this filter if the uri is hitting S3Secret generation or // revocation endpoint. - if (requestContext.getUriInfo().getPath().startsWith("secret")) { + if (requestContext.getUriInfo().getRequestUri().getPath() + .startsWith("/secret")) { return; } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java index 26fbc0f09d9..3bf3dcdd7bf 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java @@ -23,13 +23,22 @@ import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.UriInfo; import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.time.LocalDate; +import java.time.LocalDateTime; import java.util.stream.Stream; import org.apache.hadoop.ozone.s3.signature.AWSSignatureProcessor; +import org.apache.hadoop.ozone.s3.signature.SignatureInfo; +import org.apache.hadoop.ozone.s3.signature.StringToSignProducer; +import org.apache.kerby.util.Hex; import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; import static java.net.HttpURLConnection.HTTP_FORBIDDEN; +import static org.apache.hadoop.ozone.s3.signature.AWSSignatureProcessor.DATE_FORMATTER; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; import static org.apache.hadoop.ozone.s3.signature.SignatureParser.AUTHORIZATION_HEADER; @@ -41,7 +50,6 @@ import static org.junit.Assert.fail; import static org.junit.jupiter.params.provider.Arguments.arguments; -import org.apache.hadoop.ozone.s3.signature.SignatureInfo; import org.junit.Assert; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -59,9 +67,15 @@ public class TestAuthorizationFilter { private MultivaluedMap queryMap; private MultivaluedMap pathParamsMap; + private static final String DATETIME = StringToSignProducer.TIME_FORMATTER. + format(LocalDateTime.now()); + + private static final String CURDATE = DATE_FORMATTER.format(LocalDate.now()); + private static StreamtestAuthFilterFailuresInput() { return Stream.of( arguments( + "GET", "AWS4-HMAC-SHA256 Credential=testuser1/20190221/us-west-1/s3" + "/aws4_request, SignedHeaders=content-md5;host;" + "x-amz-content-sha256;x-amz-date, " + @@ -74,9 +88,11 @@ public class TestAuthorizationFilter { "1e28375860c6", "20190221T002037Z", "", - "/" + "/", + MALFORMED_HEADER.getErrorMessage() ), arguments( + "GET", "AWS4-HMAC-SHA256 " + "Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request," + " SignedHeaders=content-type;host;x-amz-date, " + @@ -88,57 +104,39 @@ public class TestAuthorizationFilter { "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "20150830T123600Z", "application/x-www-form-urlencoded; charset=utf-8", - "" + "", + MALFORMED_HEADER.getErrorMessage() ), - arguments(null, null, null, null, null, null, null), - arguments("", null, null, null, null, null, null), + arguments(null, null, null, null, null, null, null, null, + S3_AUTHINFO_CREATION_ERROR.getErrorMessage()), + arguments(null, "", null, null, null, null, null, null, + S3_AUTHINFO_CREATION_ERROR.getErrorMessage()), // AWS V2 signature arguments( + "GET", "AWS AKIDEXAMPLE:St7bHPOdkmsX/GITGe98rOQiUCg=", "", "s3g:9878", "", "Wed, 22 Mar 2023 17:00:06 +0000", "application/octet-stream", - "/" + "/", + S3_AUTHINFO_CREATION_ERROR.getErrorMessage() ) ); } + @SuppressWarnings("checkstyle:ParameterNumber") @ParameterizedTest @MethodSource("testAuthFilterFailuresInput") public void testAuthFilterFailures( - String authHeader, String contentMd5, + String method, String authHeader, String contentMd5, String host, String amzContentSha256, String date, String contentType, - String path + String path, String expectedErrorMsg ) { - headerMap = new MultivaluedHashMap<>(); - queryMap = new MultivaluedHashMap<>(); - pathParamsMap = new MultivaluedHashMap<>(); try { - System.err.println("Testing: " + authHeader); - headerMap.putSingle(AUTHORIZATION_HEADER, authHeader); - headerMap.putSingle(CONTENT_MD5, contentMd5); - headerMap.putSingle(HOST_HEADER, host); - headerMap.putSingle(X_AMZ_CONTENT_SHA256, amzContentSha256); - headerMap.putSingle(X_AMAZ_DATE, date); - headerMap.putSingle(CONTENT_TYPE, contentType); - - UriInfo uriInfo = Mockito.mock(UriInfo.class); - ContainerRequestContext context = Mockito.mock( - ContainerRequestContext.class); - Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryMap); - Mockito.when(uriInfo.getRequestUri()).thenReturn( - new URI("http://" + host + path)); - - Mockito.when(context.getUriInfo()).thenReturn(uriInfo); - Mockito.when(context.getHeaders()).thenReturn(headerMap); - Mockito.when(context.getHeaderString(AUTHORIZATION_HEADER)) - .thenReturn(authHeader); - Mockito.when(context.getUriInfo().getQueryParameters()) - .thenReturn(queryMap); - Mockito.when(context.getUriInfo().getPathParameters()) - .thenReturn(pathParamsMap); + ContainerRequestContext context = setupContext(method, authHeader, + contentMd5, host, amzContentSha256, date, contentType, path); AWSSignatureProcessor awsSignatureProcessor = new AWSSignatureProcessor(); awsSignatureProcessor.setContext(context); @@ -158,13 +156,13 @@ public void testAuthFilterFailures( // Empty auth header and unsupported AWS signature // should fail with Invalid Request. Assert.assertEquals(HTTP_FORBIDDEN, ex.getResponse().getStatus()); - Assert.assertEquals(S3_AUTHINFO_CREATION_ERROR.getErrorMessage(), + Assert.assertEquals(expectedErrorMsg, ex.getMessage()); } else { // Other requests have stale timestamp and // should fail with Malformed Authorization Header. Assert.assertEquals(HTTP_BAD_REQUEST, ex.getResponse().getStatus()); - Assert.assertEquals(MALFORMED_HEADER.getErrorMessage(), + Assert.assertEquals(expectedErrorMsg, ex.getMessage()); } @@ -174,6 +172,162 @@ public void testAuthFilterFailures( } } - //testStrToSign generation + private static StreamtestAuthFilterInput() { + return Stream.of( + // Path style URI + arguments( + "GET", + "AWS4-HMAC-SHA256 Credential=testuser1/" + CURDATE + + "/us-east-1/s3/aws4_request, " + + "SignedHeaders=host;x-amz-content-sha256;" + + "x-amz-date, " + + "Signature" + + "=56ec73ba1974f8feda8365c3caef89c5d4a688d5f9baccf47" + + "65f46a14cd745ad", + "Content-SHA", + "s3g:9878", + "Content-SHA", + DATETIME, + "", + "/bucket1/key1" + ), + // Virtual style URI + arguments( + "GET", + "AWS4-HMAC-SHA256 Credential=testuser1/" + CURDATE + + "/us-east-1/s3/aws4_request, " + + "SignedHeaders=host;x-amz-content-sha256;" + + "x-amz-date, " + + "Signature" + + "=56ec73ba1974f8feda8365c3caef89c5d4a688d5f9baccf47" + + "65f46a14cd745ad", + "Content-SHA", + "bucket1.s3g.internal:9878", + "Content-SHA", + DATETIME, + "", + "/key1" + ), + // S3 secret generation endpoint + arguments( + "POST", + null, + null, + "s3g:9878", + null, + null, + "", + "/secret/generate" + ), + // S3 secret generation endpoint + arguments( + "POST", + null, + null, + "s3g:9878", + null, + null, + "", + "/secret/revoke" + ) + ); + } + + @SuppressWarnings("checkstyle:ParameterNumber") + @ParameterizedTest + @MethodSource("testAuthFilterInput") + public void testAuthFilter( + String method, String authHeader, String contentMd5, + String host, String amzContentSha256, String date, String contentType, + String path + ) { + try { + ContainerRequestContext context = setupContext(method, authHeader, + contentMd5, host, amzContentSha256, date, contentType, path); + + AWSSignatureProcessor awsSignatureProcessor = new AWSSignatureProcessor(); + awsSignatureProcessor.setContext(context); + + SignatureInfo signatureInfo = new SignatureInfo(); + + authorizationFilter.setSignatureParser(awsSignatureProcessor); + authorizationFilter.setSignatureInfo(signatureInfo); + + authorizationFilter.filter(context); + + if (path.startsWith("/secret")) { + Assert.assertNull( + authorizationFilter.getSignatureInfo().getUnfilteredURI()); + + Assert.assertNull( + authorizationFilter.getSignatureInfo().getStringToSign()); + } else { + String canonicalRequest = method + "\n" + + path + "\n" + + "\n" + + "host:" + host + "\nx-amz-content-sha256:" + amzContentSha256 + + "\n" + + "x-amz-date:" + DATETIME + "\n" + + "\n" + + "host;x-amz-content-sha256;x-amz-date\n" + + amzContentSha256; + + MessageDigest md = MessageDigest.getInstance("SHA-256"); + md.update(canonicalRequest.getBytes(StandardCharsets.UTF_8)); + + String expectedStrToSign = "AWS4-HMAC-SHA256\n" + + DATETIME + "\n" + + CURDATE + "/us-east-1/s3/aws4_request\n" + + Hex.encode(md.digest()).toLowerCase(); + + Assert.assertEquals("Unfiltered URI is not preserved", + path, + authorizationFilter.getSignatureInfo().getUnfilteredURI()); + + Assert.assertEquals("String to sign is invalid", + expectedStrToSign, + authorizationFilter.getSignatureInfo().getStringToSign()); + } + } catch (Exception ex) { + fail("Unexpected exception: " + ex); + } + } + + @SuppressWarnings("checkstyle:ParameterNumber") + private ContainerRequestContext setupContext( + String method, String authHeader, String contentMd5, + String host, String amzContentSha256, String date, String contentType, + String path) throws URISyntaxException { + headerMap = new MultivaluedHashMap<>(); + queryMap = new MultivaluedHashMap<>(); + pathParamsMap = new MultivaluedHashMap<>(); + + System.err.println("Testing: " + authHeader); + headerMap.putSingle(AUTHORIZATION_HEADER, authHeader); + headerMap.putSingle(CONTENT_MD5, contentMd5); + headerMap.putSingle(HOST_HEADER, host); + headerMap.putSingle(X_AMZ_CONTENT_SHA256, amzContentSha256); + headerMap.putSingle(X_AMAZ_DATE, date); + headerMap.putSingle(CONTENT_TYPE, contentType); + + UriInfo uriInfo = Mockito.mock(UriInfo.class); + ContainerRequestContext context = Mockito.mock( + ContainerRequestContext.class); + Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryMap); + Mockito.when(uriInfo.getRequestUri()).thenReturn( + new URI("http://" + host + path)); + + Mockito.when(context.getMethod()).thenReturn(method); + Mockito.when(context.getUriInfo()).thenReturn(uriInfo); + Mockito.when(context.getHeaders()).thenReturn(headerMap); + Mockito.when(context.getHeaderString(AUTHORIZATION_HEADER)) + .thenReturn(authHeader); + Mockito.when(context.getUriInfo().getQueryParameters()) + .thenReturn(queryMap); + Mockito.when(context.getUriInfo().getPathParameters()) + .thenReturn(pathParamsMap); + + return context; + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java index d4f4e794a02..81b8e0c5037 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java @@ -125,6 +125,29 @@ public void testPathStyle() throws Exception { } + @Test + public void testS3SecretEndpoint() throws Exception { + + VirtualHostStyleFilter virtualHostStyleFilter = + new VirtualHostStyleFilter(); + virtualHostStyleFilter.setConfiguration(conf); + + ContainerRequest containerRequest = createContainerRequest("mybucket" + + ".localhost:9878", "/secret/generate", + null, true); + virtualHostStyleFilter.filter(containerRequest); + URI expected = new URI("http://" + s3HttpAddr + "/secret/generate"); + Assert.assertEquals(expected, containerRequest.getRequestUri()); + + containerRequest = createContainerRequest("mybucket" + + ".localhost:9878", "/secret/revoke", + null, true); + virtualHostStyleFilter.filter(containerRequest); + expected = new URI("http://" + s3HttpAddr + "/secret/revoke"); + Assert.assertEquals(expected, containerRequest.getRequestUri()); + + } + @Test public void testVirtualHostStyleWithCreateBucketRequest() throws Exception { From 349e37401923f3439876e15ef5d694a1c6015cce Mon Sep 17 00:00:00 2001 From: saketa Date: Fri, 1 Mar 2024 08:20:36 -0800 Subject: [PATCH 07/15] HDDS-5865. Made read retry max attempts and interval conigurable. --- .../hadoop/hdds/scm/OzoneClientConfig.java | 33 +++++++++++++++++++ .../hdds/scm/storage/BlockInputStream.java | 7 ++-- .../hdds/scm/storage/ExtendedInputStream.java | 15 +++++++++ .../scm/storage/TestBlockInputStream.java | 4 +++ .../hadoop/ozone/client/rpc/RpcClient.java | 1 + 5 files changed, 58 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 3042b4d847a..d484f665f5e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -144,6 +144,23 @@ public enum ChecksumCombineMode { tags = ConfigTag.CLIENT) private int retryInterval = 0; + @Config(key = "max.read.retries", + defaultValue = "3", + description = "Maximum number of retries by Ozone Client on " + + "encountering connectivity exception when reading a key.", + tags = ConfigTag.CLIENT) + private int maxReadRetryCount = 3; + + @Config(key = "read.retry.interval", + defaultValue = "0", + description = + "Indicates the time duration in milliseconds a client will wait " + + "before retrying a read key request on encountering " + + "a connectivity excepetion from Datanodes . " + + "By default the interval is 1millisecond", + tags = ConfigTag.CLIENT) + private int readRetryInterval = 1; + @Config(key = "checksum.type", defaultValue = "CRC32", description = "The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] " @@ -326,6 +343,22 @@ public void setRetryInterval(int retryInterval) { this.retryInterval = retryInterval; } + public int getMaxReadRetryCount() { + return maxReadRetryCount; + } + + public void setMaxReadRetryCount(int maxReadRetryCount) { + this.maxReadRetryCount = maxReadRetryCount; + } + + public int getReadRetryInterval() { + return readRetryInterval; + } + + public void setReadRetryInterval(int readRetryInterval) { + this.readRetryInterval = readRetryInterval; + } + public ChecksumType getChecksumType() { return ChecksumType.valueOf(checksumType); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 1fb4bf954c7..12d7c9a7c42 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -76,8 +76,11 @@ public class BlockInputStream extends BlockExtendedInputStream { private XceiverClientSpi xceiverClient; private boolean initialized = false; // TODO: do we need to change retrypolicy based on exception. - private final RetryPolicy retryPolicy = - HddsClientUtils.createRetryPolicy(3, TimeUnit.SECONDS.toMillis(1)); + // Default retry, retry for DN connectivity issues & what else? + private final RetryPolicy retryPolicy = getRetryPolicy(); + //= + // HddsClientUtils.createRetryPolicy(3, TimeUnit.SECONDS.toMillis(1)); + private int retries; // List of ChunkInputStreams, one for each chunk in the block diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java index 3e78abbf485..d8c93eaa6b6 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java @@ -22,11 +22,15 @@ import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.fs.StreamCapabilities; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; +import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.util.StringUtils; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; /** * Abstact class which extends InputStream and some common interfaces used by @@ -36,6 +40,7 @@ public abstract class ExtendedInputStream extends InputStream implements Seekable, CanUnbuffer, ByteBufferReadable, StreamCapabilities { protected static final int EOF = -1; + protected static RetryPolicy retryPolicy; @Override public synchronized int read() throws IOException { @@ -101,4 +106,14 @@ public boolean hasCapability(String capability) { return false; } } + + public static void setRetryPolicy(OzoneClientConfig config) { + retryPolicy = + HddsClientUtils.createRetryPolicy(config.getMaxReadRetryCount(), + TimeUnit.SECONDS.toMillis(config.getReadRetryInterval())); + } + + public static RetryPolicy getRetryPolicy() { + return retryPolicy; + } } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index 7755adc5f33..b6d3a4cedfa 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -22,8 +22,10 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -96,6 +98,8 @@ public void setup() throws Exception { Pipeline pipeline = MockPipeline.createSingleNodePipeline(); blockStream = new DummyBlockInputStream(blockID, blockSize, pipeline, null, false, null, refreshFunction, chunks, chunkDataMap); + BlockInputStream.setRetryPolicy(new OzoneConfiguration() + .getObject(OzoneClientConfig.class)); } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index bcb08f1d913..8e3f063885d 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -2259,6 +2259,7 @@ private OzoneInputStream createInputStream( // When Key is not MPU or when Key is MPU and encryption is not enabled // Need to revisit for GDP. FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo(); + KeyInputStream.setRetryPolicy(clientConfig); if (feInfo == null) { LengthInputStream lengthInputStream = KeyInputStream From d836409c412af7eef81ee5c83681bf3815729546 Mon Sep 17 00:00:00 2001 From: saketa Date: Fri, 1 Mar 2024 08:32:13 -0800 Subject: [PATCH 08/15] HDDS-5865. Fixed checkstyle errors. --- .../java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java | 4 ++-- .../org/apache/hadoop/hdds/scm/storage/BlockInputStream.java | 2 -- .../apache/hadoop/hdds/scm/storage/ExtendedInputStream.java | 4 ++-- .../java/org/apache/hadoop/ozone/client/rpc/RpcClient.java | 1 + 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index d484f665f5e..32ba6081e4f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -152,9 +152,9 @@ public enum ChecksumCombineMode { private int maxReadRetryCount = 3; @Config(key = "read.retry.interval", - defaultValue = "0", + defaultValue = "1", description = - "Indicates the time duration in milliseconds a client will wait " + "Indicates the time duration in seconds a client will wait " + "before retrying a read key request on encountering " + "a connectivity excepetion from Datanodes . " + "By default the interval is 1millisecond", diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 12d7c9a7c42..0c2f7d56abc 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -39,7 +38,6 @@ import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi.Validator; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java index d8c93eaa6b6..2fc88ddc548 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java @@ -40,7 +40,7 @@ public abstract class ExtendedInputStream extends InputStream implements Seekable, CanUnbuffer, ByteBufferReadable, StreamCapabilities { protected static final int EOF = -1; - protected static RetryPolicy retryPolicy; + private static RetryPolicy retryPolicy; @Override public synchronized int read() throws IOException { @@ -114,6 +114,6 @@ public static void setRetryPolicy(OzoneClientConfig config) { } public static RetryPolicy getRetryPolicy() { - return retryPolicy; + return retryPolicy; } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 8e3f063885d..7b4bafa30da 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -2259,6 +2259,7 @@ private OzoneInputStream createInputStream( // When Key is not MPU or when Key is MPU and encryption is not enabled // Need to revisit for GDP. FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo(); + // Set read retry policy for the streams KeyInputStream.setRetryPolicy(clientConfig); if (feInfo == null) { From 59b2fc8326cba0b9a6fc32427d1edc9108fd2ff0 Mon Sep 17 00:00:00 2001 From: saketa Date: Fri, 1 Mar 2024 12:01:29 -0800 Subject: [PATCH 09/15] HDDS-5865. Fixed integratoin test errors. --- .../src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index a04c1236186..59dac617664 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -138,6 +139,7 @@ public static void init() throws Exception { startCluster(); client = cluster.newClient(); createTestData(); + KeyInputStream.setRetryPolicy(conf.getObject(OzoneClientConfig.class)); } private static void createTestData() throws IOException { From 27752aeda74b824fb07057efd027c2999bf35d0d Mon Sep 17 00:00:00 2001 From: saketa Date: Mon, 4 Mar 2024 08:23:34 -0800 Subject: [PATCH 10/15] HDDS-5865. Removed unnecessary comment. --- .../org/apache/hadoop/hdds/scm/storage/BlockInputStream.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 0c2f7d56abc..6e9d27ecb7d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -76,8 +76,6 @@ public class BlockInputStream extends BlockExtendedInputStream { // TODO: do we need to change retrypolicy based on exception. // Default retry, retry for DN connectivity issues & what else? private final RetryPolicy retryPolicy = getRetryPolicy(); - //= - // HddsClientUtils.createRetryPolicy(3, TimeUnit.SECONDS.toMillis(1)); private int retries; From 037689f66932602cfbd8ebb7dae1f118a37fa2fb Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 5 Mar 2024 10:20:54 -0800 Subject: [PATCH 11/15] HDDS-5865. Removed unnecessary comments. --- .../org/apache/hadoop/hdds/scm/storage/BlockInputStream.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 6e9d27ecb7d..73d9df913b9 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -74,7 +74,6 @@ public class BlockInputStream extends BlockExtendedInputStream { private XceiverClientSpi xceiverClient; private boolean initialized = false; // TODO: do we need to change retrypolicy based on exception. - // Default retry, retry for DN connectivity issues & what else? private final RetryPolicy retryPolicy = getRetryPolicy(); private int retries; From 50eb1a506ebbfcc1af35044ad9210b8b7287e36b Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 19 Mar 2024 12:32:23 -0700 Subject: [PATCH 12/15] HDDS-5865. Made read retry max attempts and interval configurable and modifies InputStream classes to access client configuration. --- .../hdds/scm/storage/BlockInputStream.java | 16 +++-- .../hdds/scm/storage/ExtendedInputStream.java | 14 ---- .../client/io/BlockInputStreamFactory.java | 4 +- .../io/BlockInputStreamFactoryImpl.java | 9 ++- .../ozone/client/io/ECBlockInputStream.java | 10 ++- .../client/io/ECBlockInputStreamFactory.java | 4 +- .../io/ECBlockInputStreamFactoryImpl.java | 9 ++- .../client/io/ECBlockInputStreamProxy.java | 8 ++- ...ECBlockReconstructedStripeInputStream.java | 6 +- .../scm/storage/DummyBlockInputStream.java | 6 +- .../DummyBlockInputStreamWithRetry.java | 6 +- .../scm/storage/TestBlockInputStream.java | 16 +++-- .../ozone/client/io/ECStreamTestUtil.java | 4 +- .../io/TestBlockInputStreamFactoryImpl.java | 10 ++- .../client/io/TestECBlockInputStream.java | 72 +++++++++++++------ .../io/TestECBlockInputStreamProxy.java | 9 ++- .../TestECBlockReconstructedInputStream.java | 6 +- ...ECBlockReconstructedStripeInputStream.java | 8 ++- .../ECReconstructionCoordinator.java | 3 +- .../ozone/client/io/KeyInputStream.java | 23 +++--- .../hadoop/ozone/client/rpc/RpcClient.java | 8 +-- .../ozone/client/io/TestKeyInputStreamEC.java | 9 ++- .../apache/hadoop/ozone/TestBlockTokens.java | 4 +- .../hadoop/ozone/om/TestChunkStreams.java | 7 +- 24 files changed, 178 insertions(+), 93 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 73d9df913b9..67ed04cb66a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -35,9 +36,11 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi.Validator; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; @@ -74,7 +77,7 @@ public class BlockInputStream extends BlockExtendedInputStream { private XceiverClientSpi xceiverClient; private boolean initialized = false; // TODO: do we need to change retrypolicy based on exception. - private final RetryPolicy retryPolicy = getRetryPolicy(); + private final RetryPolicy retryPolicy; private int retries; @@ -112,7 +115,8 @@ public class BlockInputStream extends BlockExtendedInputStream { public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, Token token, boolean verifyChecksum, XceiverClientFactory xceiverClientFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { this.blockID = blockId; this.length = blockLen; setPipeline(pipeline); @@ -120,15 +124,19 @@ public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, this.verifyChecksum = verifyChecksum; this.xceiverClientFactory = xceiverClientFactory; this.refreshFunction = refreshFunction; + this.retryPolicy = + HddsClientUtils.createRetryPolicy(config.getMaxReadRetryCount(), + TimeUnit.SECONDS.toMillis(config.getReadRetryInterval())); } public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, Token token, boolean verifyChecksum, - XceiverClientFactory xceiverClientFactory + XceiverClientFactory xceiverClientFactory, + OzoneClientConfig config ) { this(blockId, blockLen, pipeline, token, verifyChecksum, - xceiverClientFactory, null); + xceiverClientFactory, null, config); } /** * Initialize the BlockInputStream. Get the BlockData (list of chunks) from diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java index 2fc88ddc548..15ca88baf0f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java @@ -22,15 +22,11 @@ import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.fs.StreamCapabilities; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.util.StringUtils; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; -import java.util.concurrent.TimeUnit; /** * Abstact class which extends InputStream and some common interfaces used by @@ -40,7 +36,6 @@ public abstract class ExtendedInputStream extends InputStream implements Seekable, CanUnbuffer, ByteBufferReadable, StreamCapabilities { protected static final int EOF = -1; - private static RetryPolicy retryPolicy; @Override public synchronized int read() throws IOException { @@ -107,13 +102,4 @@ public boolean hasCapability(String capability) { } } - public static void setRetryPolicy(OzoneClientConfig config) { - retryPolicy = - HddsClientUtils.createRetryPolicy(config.getMaxReadRetryCount(), - TimeUnit.SECONDS.toMillis(config.getReadRetryInterval())); - } - - public static RetryPolicy getRetryPolicy() { - return retryPolicy; - } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java index bd100214ae4..bf8a6e3a2d1 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -50,6 +51,7 @@ BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, Token token, boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction); + Function refreshFunction, + OzoneClientConfig config); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java index 40063f9ce49..430f577208f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -78,14 +79,16 @@ public BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, Token token, boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { if (repConfig.getReplicationType().equals(HddsProtos.ReplicationType.EC)) { return new ECBlockInputStreamProxy((ECReplicationConfig)repConfig, blockInfo, verifyChecksum, xceiverFactory, refreshFunction, - ecBlockStreamFactory); + ecBlockStreamFactory, config); } else { return new BlockInputStream(blockInfo.getBlockID(), blockInfo.getLength(), - pipeline, token, verifyChecksum, xceiverFactory, refreshFunction); + pipeline, token, verifyChecksum, xceiverFactory, refreshFunction, + config); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index e85bf27d530..fc17a776c72 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -75,7 +76,7 @@ public class ECBlockInputStream extends BlockExtendedInputStream { private long position = 0; private boolean closed = false; private boolean seeked = false; - + private OzoneClientConfig config; protected ECReplicationConfig getRepConfig() { return repConfig; } @@ -111,7 +112,8 @@ public ECBlockInputStream(ECReplicationConfig repConfig, BlockLocationInfo blockInfo, boolean verifyChecksum, XceiverClientFactory xceiverClientFactory, Function refreshFunction, - BlockInputStreamFactory streamFactory) { + BlockInputStreamFactory streamFactory, + OzoneClientConfig config) { this.repConfig = repConfig; this.ecChunkSize = repConfig.getEcChunkSize(); this.verifyChecksum = verifyChecksum; @@ -123,6 +125,7 @@ public ECBlockInputStream(ECReplicationConfig repConfig, this.dataLocations = new DatanodeDetails[repConfig.getRequiredNodes()]; this.blockStreams = new BlockExtendedInputStream[repConfig.getRequiredNodes()]; + this.config = config; this.stripeSize = (long)ecChunkSize * repConfig.getData(); setBlockLocations(this.blockInfo.getPipeline()); @@ -192,7 +195,8 @@ protected BlockExtendedInputStream getOrOpenStream(int locationIndex) { HddsProtos.ReplicationFactor.ONE), blkInfo, pipeline, blockInfo.getToken(), verifyChecksum, xceiverClientFactory, - ecPipelineRefreshFunction(locationIndex + 1, refreshFunction)); + ecPipelineRefreshFunction(locationIndex + 1, refreshFunction), + config); blockStreams[locationIndex] = stream; LOG.debug("{}: created stream [{}]: {}", this, locationIndex, stream); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java index 0e2ef22c1e9..382104441d8 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -53,5 +54,6 @@ BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, BlockLocationInfo blockInfo, boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction); + Function refreshFunction, + OzoneClientConfig config); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java index 36b6539ea81..4969d5a2b32 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -76,14 +77,15 @@ public BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, BlockLocationInfo blockInfo, boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { if (missingLocations) { // We create the reconstruction reader ECBlockReconstructedStripeInputStream sis = new ECBlockReconstructedStripeInputStream( (ECReplicationConfig)repConfig, blockInfo, verifyChecksum, xceiverFactory, refreshFunction, inputStreamFactory, - byteBufferPool, ecReconstructExecutorSupplier.get()); + byteBufferPool, ecReconstructExecutorSupplier.get(), config); if (failedLocations != null) { sis.addFailedDatanodes(failedLocations); } @@ -92,7 +94,8 @@ public BlockExtendedInputStream create(boolean missingLocations, } else { // Otherwise create the more efficient non-reconstruction reader return new ECBlockInputStream((ECReplicationConfig)repConfig, blockInfo, - verifyChecksum, xceiverFactory, refreshFunction, inputStreamFactory); + verifyChecksum, xceiverFactory, refreshFunction, inputStreamFactory, + config); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java index 973561616f7..517ec249fcd 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -59,6 +60,7 @@ public class ECBlockInputStreamProxy extends BlockExtendedInputStream { private boolean reconstructionReader = false; private List failedLocations = new ArrayList<>(); private boolean closed = false; + private OzoneClientConfig config; /** * Given the ECReplicationConfig and the block length, calculate how many @@ -100,13 +102,15 @@ public ECBlockInputStreamProxy(ECReplicationConfig repConfig, BlockLocationInfo blockInfo, boolean verifyChecksum, XceiverClientFactory xceiverClientFactory, Function refreshFunction, - ECBlockInputStreamFactory streamFactory) { + ECBlockInputStreamFactory streamFactory, + OzoneClientConfig config) { this.repConfig = repConfig; this.verifyChecksum = verifyChecksum; this.blockInfo = blockInfo; this.ecBlockInputStreamFactory = streamFactory; this.xceiverClientFactory = xceiverClientFactory; this.refreshFunction = refreshFunction; + this.config = config; setReaderType(); createBlockReader(); @@ -125,7 +129,7 @@ private void createBlockReader() { } blockReader = ecBlockInputStreamFactory.create(reconstructionReader, failedLocations, repConfig, blockInfo, verifyChecksum, - xceiverClientFactory, refreshFunction); + xceiverClientFactory, refreshFunction, config); } @Override diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java index 142825cb120..015df017be2 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -157,9 +158,10 @@ public ECBlockReconstructedStripeInputStream(ECReplicationConfig repConfig, Function refreshFunction, BlockInputStreamFactory streamFactory, ByteBufferPool byteBufferPool, - ExecutorService ecReconstructExecutor) { + ExecutorService ecReconstructExecutor, + OzoneClientConfig config) { super(repConfig, blockInfo, verifyChecksum, xceiverClientFactory, - refreshFunction, streamFactory); + refreshFunction, streamFactory, config); this.byteBufferPool = byteBufferPool; this.executor = ecReconstructExecutor; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java index 3e7779f0d10..bf46bf97560 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; @@ -48,9 +49,10 @@ class DummyBlockInputStream extends BlockInputStream { XceiverClientFactory xceiverClientManager, Function refreshFunction, List chunkList, - Map chunks) { + Map chunks, + OzoneClientConfig config) { super(blockId, blockLen, pipeline, token, verifyChecksum, - xceiverClientManager, refreshFunction); + xceiverClientManager, refreshFunction, config); this.chunkDataMap = chunks; this.chunks = chunkList; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java index 24a35745144..7c968c2ffff 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; @@ -55,7 +56,8 @@ final class DummyBlockInputStreamWithRetry XceiverClientFactory xceiverClientManager, List chunkList, Map chunkMap, - AtomicBoolean isRerfreshed, IOException ioException) { + AtomicBoolean isRerfreshed, IOException ioException, + OzoneClientConfig config) { super(blockId, blockLen, pipeline, token, verifyChecksum, xceiverClientManager, blockID -> { isRerfreshed.set(true); @@ -68,7 +70,7 @@ final class DummyBlockInputStreamWithRetry throw new RuntimeException(e); } - }, chunkList, chunkMap); + }, chunkList, chunkMap, config); this.ioException = ioException; } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index b6d3a4cedfa..87f144d59a3 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -87,6 +87,8 @@ public class TestBlockInputStream { private Function refreshFunction; + private OzoneConfiguration conf = new OzoneConfiguration(); + @BeforeEach @SuppressWarnings("unchecked") public void setup() throws Exception { @@ -97,9 +99,8 @@ public void setup() throws Exception { Pipeline pipeline = MockPipeline.createSingleNodePipeline(); blockStream = new DummyBlockInputStream(blockID, blockSize, pipeline, null, - false, null, refreshFunction, chunks, chunkDataMap); - BlockInputStream.setRetryPolicy(new OzoneConfiguration() - .getObject(OzoneClientConfig.class)); + false, null, refreshFunction, chunks, chunkDataMap, + conf.getObject(OzoneClientConfig.class)); } /** @@ -267,7 +268,8 @@ public void testRefreshPipelineFunction() throws Exception { try (BlockInputStream blockInputStreamWithRetry = new DummyBlockInputStreamWithRetry(blockID, blockSize, MockPipeline.createSingleNodePipeline(), null, - false, null, chunks, chunkDataMap, isRefreshed, null)) { + false, null, chunks, chunkDataMap, isRefreshed, null, + conf.getObject(OzoneClientConfig.class))) { assertFalse(isRefreshed.get()); seekAndVerify(50); byte[] b = new byte[200]; @@ -352,7 +354,8 @@ private static ChunkInputStream throwingChunkInputStream(IOException ex, private BlockInputStream createSubject(BlockID blockID, Pipeline pipeline, ChunkInputStream stream) { return new DummyBlockInputStream(blockID, blockSize, pipeline, null, false, - null, refreshFunction, chunks, null) { + null, refreshFunction, chunks, null, + conf.getObject(OzoneClientConfig.class)) { @Override protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; @@ -405,7 +408,8 @@ public void testRefreshOnReadFailureAfterUnbuffer(IOException ex) when(blockLocationInfo.getPipeline()).thenReturn(newPipeline); BlockInputStream subject = new BlockInputStream(blockID, blockSize, - pipeline, null, false, clientFactory, refreshFunction) { + pipeline, null, false, clientFactory, refreshFunction, + conf.getObject(OzoneClientConfig.class)) { @Override protected List getChunkInfoListUsingClient() { return chunks; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java index 41bf46a8ea2..28a8401994a 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -260,7 +261,8 @@ public synchronized BlockExtendedInputStream create( BlockLocationInfo blockInfo, Pipeline pipeline, Token token, boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { int repInd = currentPipeline.getReplicaIndex(pipeline.getNodes().get(0)); TestBlockInputStream stream = new TestBlockInputStream( diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java index cf3f4f13ef9..6d35cea1d7a 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java @@ -21,9 +21,11 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -43,6 +45,8 @@ */ public class TestBlockInputStreamFactoryImpl { + private OzoneConfiguration conf = new OzoneConfiguration(); + @Test public void testNonECGivesBlockInputStream() { BlockInputStreamFactory factory = new BlockInputStreamFactoryImpl(); @@ -54,7 +58,8 @@ public void testNonECGivesBlockInputStream() { BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), - blockInfo.getToken(), true, null, null); + blockInfo.getToken(), true, null, null, + conf.getObject(OzoneClientConfig.class)); assertInstanceOf(BlockInputStream.class, stream); assertEquals(stream.getBlockID(), blockInfo.getBlockID()); assertEquals(stream.getLength(), blockInfo.getLength()); @@ -71,7 +76,8 @@ public void testECGivesECBlockInputStream() { BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), - blockInfo.getToken(), true, null, null); + blockInfo.getToken(), true, null, null, + conf.getObject(OzoneClientConfig.class)); assertInstanceOf(ECBlockInputStreamProxy.class, stream); assertEquals(stream.getBlockID(), blockInfo.getBlockID()); assertEquals(stream.getLength(), blockInfo.getLength()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java index bd34e7546c1..356f9e27b6b 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java @@ -20,9 +20,11 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -59,6 +61,7 @@ public class TestECBlockInputStream { private ECReplicationConfig repConfig; private TestBlockInputStreamFactory streamFactory; + private OzoneConfiguration conf = new OzoneConfiguration(); @BeforeEach public void setup() { @@ -73,14 +76,16 @@ public void testSufficientLocations() { BlockLocationInfo keyInfo = ECStreamTestUtil .createKeyInfo(repConfig, 5, 5 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, true, null, null, new TestBlockInputStreamFactory(), + conf.getObject(OzoneClientConfig.class))) { assertTrue(ecb.hasSufficientLocations()); } // EC-3-2, very large block, so all 3 data locations are needed keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5000 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, true, null, null, new TestBlockInputStreamFactory(), + conf.getObject(OzoneClientConfig.class))) { assertTrue(ecb.hasSufficientLocations()); } @@ -90,7 +95,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 1); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB - 1, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, true, null, null, new TestBlockInputStreamFactory(), + conf.getObject(OzoneClientConfig.class))) { assertTrue(ecb.hasSufficientLocations()); } @@ -100,7 +106,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 1); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, true, null, null, new TestBlockInputStreamFactory(), + conf.getObject(OzoneClientConfig.class))) { assertFalse(ecb.hasSufficientLocations()); } @@ -112,7 +119,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 5); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, true, null, null, new TestBlockInputStreamFactory(), + conf.getObject(OzoneClientConfig.class))) { assertFalse(ecb.hasSufficientLocations()); } } @@ -125,7 +133,8 @@ public void testCorrectBlockSizePassedToBlockStreamLessThanCell() ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB - 100); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ecb.read(buf); // We expect only 1 block stream and it should have a length passed of // ONEMB - 100. @@ -142,7 +151,8 @@ public void testCorrectBlockSizePassedToBlockStreamTwoCells() ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB + 100); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -158,7 +168,8 @@ public void testCorrectBlockSizePassedToBlockStreamThreeCells() ECStreamTestUtil.createKeyInfo(repConfig, 5, 2 * ONEMB + 100); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -175,7 +186,8 @@ public void testCorrectBlockSizePassedToBlockStreamThreeFullAndPartialStripe() ECStreamTestUtil.createKeyInfo(repConfig, 5, 10 * ONEMB + 100); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(4 * ONEMB, streams.get(0).getLength()); @@ -192,7 +204,8 @@ public void testCorrectBlockSizePassedToBlockStreamSingleFullCell() ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -207,7 +220,8 @@ public void testCorrectBlockSizePassedToBlockStreamSeveralFullCells() ECStreamTestUtil.createKeyInfo(repConfig, 5, 9 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(3 * ONEMB, streams.get(0).getLength()); @@ -221,7 +235,8 @@ public void testSimpleRead() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -244,7 +259,8 @@ public void testSimpleReadUnderOneChunk() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 1, ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -263,7 +279,8 @@ public void testReadPastEOF() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 50); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -282,7 +299,8 @@ public void testReadCrossingMultipleECChunkBounds() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { // EC Chunk size is 100 and 3-2. Create a byte buffer to read 3.5 chunks, // so 350 @@ -317,7 +335,8 @@ public void testSeekPastBlockLength() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 100); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { assertThrows(EOFException.class, () -> ecb.seek(1000)); } } @@ -329,7 +348,8 @@ public void testSeekToLength() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 100); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { // When seek more than the length, should throw EOFException. assertThrows(EOFException.class, () -> ecb.seek(101)); } @@ -342,7 +362,8 @@ public void testSeekToLengthZeroLengthBlock() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 0); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ecb.seek(0); assertEquals(0, ecb.getPos()); assertEquals(0, ecb.getRemaining()); @@ -356,7 +377,8 @@ public void testSeekToValidPosition() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { ecb.seek(ONEMB - 1); assertEquals(ONEMB - 1, ecb.getPos()); assertEquals(ONEMB * 4 + 1, ecb.getRemaining()); @@ -385,7 +407,8 @@ public void testErrorReadingBlockReportsBadLocation() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { // Read a full stripe to ensure all streams are created in the stream // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); @@ -416,7 +439,8 @@ public void testNoErrorIfSpareLocationToRead() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 8 * ONEMB, datanodes); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { // Read a full stripe to ensure all streams are created in the stream // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); @@ -480,7 +504,8 @@ public void testEcPipelineRefreshFunction() { }; try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class))) { Pipeline pipeline = ecb.ecPipelineRefreshFunction(3, refreshFunction) .apply(blockID) @@ -514,7 +539,8 @@ public synchronized BlockExtendedInputStream create( ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, Token token, boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { TestBlockInputStream stream = new TestBlockInputStream( blockInfo.getBlockID(), blockInfo.getLength(), (byte)blockStreams.size()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java index 97bf71c204a..24e4272538e 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java @@ -20,7 +20,9 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -52,6 +54,7 @@ public class TestECBlockInputStreamProxy { private long randomSeed; private ThreadLocalRandom random = ThreadLocalRandom.current(); private SplittableRandom dataGenerator; + private OzoneConfiguration conf = new OzoneConfiguration(); @BeforeEach public void setup() { @@ -343,7 +346,8 @@ private void resetAndAdvanceDataGenerator(long position) { private ECBlockInputStreamProxy createBISProxy(ECReplicationConfig rConfig, BlockLocationInfo blockInfo) { return new ECBlockInputStreamProxy( - rConfig, blockInfo, true, null, null, streamFactory); + rConfig, blockInfo, true, null, null, streamFactory, + conf.getObject(OzoneClientConfig.class)); } private static class TestECBlockInputStreamFactory @@ -373,7 +377,8 @@ public BlockExtendedInputStream create(boolean missingLocations, List failedDatanodes, ReplicationConfig repConfig, BlockLocationInfo blockInfo, boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { this.failedLocations = failedDatanodes; ByteBuffer wrappedBuffer = ByteBuffer.wrap(data.array(), 0, data.capacity()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java index 0425f6943a4..7e67b26faec 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java @@ -19,7 +19,9 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; @@ -54,6 +56,7 @@ public class TestECBlockReconstructedInputStream { private ByteBufferPool bufferPool = new ElasticByteBufferPool(); private ExecutorService ecReconstructExecutor = Executors.newFixedThreadPool(3); + private OzoneConfiguration conf = new OzoneConfiguration(); @BeforeEach public void setup() throws IOException { @@ -75,7 +78,8 @@ private ECBlockReconstructedStripeInputStream createStripeInputStream( ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap); streamFactory.setCurrentPipeline(keyInfo.getPipeline()); return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, true, - null, null, streamFactory, bufferPool, ecReconstructExecutor); + null, null, streamFactory, bufferPool, ecReconstructExecutor, + conf.getObject(OzoneClientConfig.class)); } @Test diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index f7a4bb0643e..a1fc26ce6e4 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -20,8 +20,10 @@ import com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; @@ -73,7 +75,8 @@ public class TestECBlockReconstructedStripeInputStream { private ByteBufferPool bufferPool = new ElasticByteBufferPool(); private ExecutorService ecReconstructExecutor = Executors.newFixedThreadPool(3); - + private OzoneConfiguration conf = new OzoneConfiguration(); +; static List> recoveryCases() { // TODO better name List> params = new ArrayList<>(); params.add(emptySet()); // non-recovery @@ -809,7 +812,8 @@ public void testFailedLocationsAreNotRead() throws IOException { private ECBlockReconstructedStripeInputStream createInputStream( BlockLocationInfo keyInfo) { return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, true, - null, null, streamFactory, bufferPool, ecReconstructExecutor); + null, null, streamFactory, bufferPool, ecReconstructExecutor, + conf.getObject(OzoneClientConfig.class)); } private void addDataStreamsToFactory(ByteBuffer[] data, ByteBuffer[] parity) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index a45c1584484..25d8cb7deab 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -273,7 +273,8 @@ public void reconstructECBlockGroup(BlockLocationInfo blockLocationInfo, repConfig, blockLocationInfo, true, this.containerOperationClient.getXceiverClientManager(), null, this.blockInputStreamFactory, byteBufferPool, - this.ecReconstructReadExecutor)) { + this.ecReconstructReadExecutor, + this.ozoneClientConfig)) { ECBlockOutputStream[] targetBlockStreams = new ECBlockOutputStream[toReconstructIndexes.size()]; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index 4843c1c45e6..31977023f43 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -25,6 +25,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -60,7 +61,8 @@ private static List createStreams( XceiverClientFactory xceiverClientFactory, boolean verifyChecksum, Function retryFunction, - BlockInputStreamFactory blockStreamFactory) { + BlockInputStreamFactory blockStreamFactory, + OzoneClientConfig config) { List partStreams = new ArrayList<>(); for (OmKeyLocationInfo omKeyLocationInfo : blockInfos) { if (LOG.isDebugEnabled()) { @@ -93,7 +95,8 @@ private static List createStreams( omKeyLocationInfo.getToken(), verifyChecksum, xceiverClientFactory, - retry); + retry, + config); partStreams.add(stream); } return partStreams; @@ -120,10 +123,11 @@ private static LengthInputStream getFromOmKeyInfo( boolean verifyChecksum, Function retryFunction, BlockInputStreamFactory blockStreamFactory, - List locationInfos) { + List locationInfos, + OzoneClientConfig config) { List streams = createStreams(keyInfo, locationInfos, xceiverClientFactory, verifyChecksum, retryFunction, - blockStreamFactory); + blockStreamFactory, config); KeyInputStream keyInputStream = new KeyInputStream(keyInfo.getKeyName(), streams); return new LengthInputStream(keyInputStream, keyInputStream.getLength()); @@ -135,19 +139,21 @@ private static LengthInputStream getFromOmKeyInfo( public static LengthInputStream getFromOmKeyInfo(OmKeyInfo keyInfo, XceiverClientFactory xceiverClientFactory, boolean verifyChecksum, Function retryFunction, - BlockInputStreamFactory blockStreamFactory) { + BlockInputStreamFactory blockStreamFactory, + OzoneClientConfig config) { List keyLocationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); return getFromOmKeyInfo(keyInfo, xceiverClientFactory, verifyChecksum, - retryFunction, blockStreamFactory, keyLocationInfos); + retryFunction, blockStreamFactory, keyLocationInfos, config); } public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo, XceiverClientFactory xceiverClientFactory, boolean verifyChecksum, Function retryFunction, - BlockInputStreamFactory blockStreamFactory) { + BlockInputStreamFactory blockStreamFactory, + OzoneClientConfig config) { List keyLocationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); @@ -162,7 +168,8 @@ public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo, // Create a KeyInputStream for each part. for (List locationInfo : partsToBlocksMap.values()) { lengthInputStreams.add(getFromOmKeyInfo(keyInfo, xceiverClientFactory, - verifyChecksum, retryFunction, blockStreamFactory, locationInfo)); + verifyChecksum, retryFunction, blockStreamFactory, locationInfo, + config)); } return lengthInputStreams; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 7b4bafa30da..3b6469c8c5c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -2259,14 +2259,12 @@ private OzoneInputStream createInputStream( // When Key is not MPU or when Key is MPU and encryption is not enabled // Need to revisit for GDP. FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo(); - // Set read retry policy for the streams - KeyInputStream.setRetryPolicy(clientConfig); if (feInfo == null) { LengthInputStream lengthInputStream = KeyInputStream .getFromOmKeyInfo(keyInfo, xceiverClientManager, clientConfig.isChecksumVerify(), retryFunction, - blockInputStreamFactory); + blockInputStreamFactory, clientConfig); try { final GDPRSymmetricKey gk = getGDPRSymmetricKey( keyInfo.getMetadata(), Cipher.DECRYPT_MODE); @@ -2283,7 +2281,7 @@ private OzoneInputStream createInputStream( LengthInputStream lengthInputStream = KeyInputStream .getFromOmKeyInfo(keyInfo, xceiverClientManager, clientConfig.isChecksumVerify(), retryFunction, - blockInputStreamFactory); + blockInputStreamFactory, clientConfig); final KeyProvider.KeyVersion decrypted = getDEK(feInfo); final CryptoInputStream cryptoIn = new CryptoInputStream(lengthInputStream.getWrappedStream(), @@ -2295,7 +2293,7 @@ private OzoneInputStream createInputStream( List lengthInputStreams = KeyInputStream .getStreamsFromKeyInfo(keyInfo, xceiverClientManager, clientConfig.isChecksumVerify(), retryFunction, - blockInputStreamFactory); + blockInputStreamFactory, clientConfig); final KeyProvider.KeyVersion decrypted = getDEK(feInfo); List cryptoInputStreams = new ArrayList<>(); diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java index 6af5c4b4e0d..3651ae2401d 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java @@ -20,8 +20,10 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -49,6 +51,8 @@ */ public class TestKeyInputStreamEC { + private OzoneConfiguration conf = new OzoneConfiguration(); + @Test public void testReadAgainstLargeBlockGroup() throws IOException { int dataBlocks = 10; @@ -68,10 +72,11 @@ public void testReadAgainstLargeBlockGroup() throws IOException { BlockInputStreamFactory mockStreamFactory = mock(BlockInputStreamFactory.class); when(mockStreamFactory.create(any(), any(), any(), any(), - anyBoolean(), any(), any())).thenReturn(blockInputStream); + anyBoolean(), any(), any(), any())).thenReturn(blockInputStream); try (LengthInputStream kis = KeyInputStream.getFromOmKeyInfo(keyInfo, - null, true, null, mockStreamFactory)) { + null, true, null, mockStreamFactory, + conf.getObject(OzoneClientConfig.class))) { byte[] buf = new byte[100]; int readBytes = kis.read(buf, 0, 100); assertEquals(100, readBytes); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index 59dac617664..b699ef6b9bc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -139,7 +139,6 @@ public static void init() throws Exception { startCluster(); client = cluster.newClient(); createTestData(); - KeyInputStream.setRetryPolicy(conf.getObject(OzoneClientConfig.class)); } private static void createTestData() throws IOException { @@ -299,7 +298,8 @@ private void readData(OmKeyInfo keyInfo, ((RpcClient) client.getProxy()).getXceiverClientManager(); try (InputStream is = KeyInputStream.getFromOmKeyInfo(keyInfo, xceiverClientManager, - false, retryFunc, blockInputStreamFactory)) { + false, retryFunc, blockInputStreamFactory, + conf.getObject(OzoneClientConfig.class))) { byte[] buf = new byte[100]; int readBytes = is.read(buf, 0, 100); assertEquals(100, readBytes); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java index dc425926ac6..d780d4d6f41 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; import org.apache.hadoop.ozone.client.io.KeyInputStream; import jakarta.annotation.Nonnull; @@ -34,6 +36,8 @@ */ public class TestChunkStreams { + private OzoneConfiguration conf = new OzoneConfiguration(); + @Test public void testReadGroupInputStream() throws Exception { String dataString = RandomStringUtils.randomAscii(500); @@ -90,7 +94,8 @@ private List createInputStreams(String dataString) { } private BlockInputStream createStream(byte[] buf, int offset) { - return new BlockInputStream(null, 100, null, null, true, null) { + return new BlockInputStream(null, 100, null, null, true, null, + conf.getObject(OzoneClientConfig.class)) { private long pos; private final ByteArrayInputStream in = new ByteArrayInputStream(buf, offset, 100); From 5e0cab3eddc7e0acadfb81f4e0df8fcb231d6527 Mon Sep 17 00:00:00 2001 From: saketa Date: Tue, 19 Mar 2024 13:33:52 -0700 Subject: [PATCH 13/15] HDDS-5865. Fixed checkstyle errors. --- .../org/apache/hadoop/hdds/scm/storage/BlockInputStream.java | 1 + .../apache/hadoop/ozone/client/io/BlockInputStreamFactory.java | 1 + .../hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java | 1 + .../hadoop/ozone/client/io/ECBlockInputStreamFactory.java | 1 + .../hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java | 1 + .../org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java | 1 + .../apache/hadoop/ozone/client/io/TestECBlockInputStream.java | 1 + .../client/io/TestECBlockReconstructedStripeInputStream.java | 2 +- 8 files changed, 8 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 67ed04cb66a..83ea332ca4d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -112,6 +112,7 @@ public class BlockInputStream extends BlockExtendedInputStream { private final Function refreshFunction; + @SuppressWarnings("checkstyle:ParameterNumber") public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, Token token, boolean verifyChecksum, XceiverClientFactory xceiverClientFactory, diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java index bf8a6e3a2d1..b58dc3886a7 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java @@ -47,6 +47,7 @@ public interface BlockInputStreamFactory { * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. */ + @SuppressWarnings("checkstyle:ParameterNumber") BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, Token token, boolean verifyChecksum, diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java index 430f577208f..020dc8f7322 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java @@ -75,6 +75,7 @@ public BlockInputStreamFactoryImpl(ByteBufferPool byteBufferPool, * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. */ + @SuppressWarnings("checkstyle:ParameterNumber") public BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, Token token, boolean verifyChecksum, diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java index 382104441d8..538f0cb9fcf 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java @@ -50,6 +50,7 @@ public interface ECBlockInputStreamFactory { * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. */ + @SuppressWarnings("checkstyle:ParameterNumber") BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, BlockLocationInfo blockInfo, boolean verifyChecksum, diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java index 4969d5a2b32..ee854dd78c6 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java @@ -73,6 +73,7 @@ private ECBlockInputStreamFactoryImpl(BlockInputStreamFactory streamFactory, * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. */ + @SuppressWarnings("checkstyle:ParameterNumber") public BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, BlockLocationInfo blockInfo, boolean verifyChecksum, diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java index 28a8401994a..28fb38c0da6 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java @@ -256,6 +256,7 @@ public synchronized void setFailIndexes(Integer... fail) { failIndexes.addAll(Arrays.asList(fail)); } + @SuppressWarnings("checkstyle:ParameterNumber") public synchronized BlockExtendedInputStream create( ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java index 356f9e27b6b..b7bc588c9d9 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java @@ -535,6 +535,7 @@ public synchronized List getBlockStreams() { return blockStreams; } + @SuppressWarnings("checkstyle:ParameterNumber") public synchronized BlockExtendedInputStream create( ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, Token token, diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index a1fc26ce6e4..53023e7826e 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -76,7 +76,7 @@ public class TestECBlockReconstructedStripeInputStream { private ExecutorService ecReconstructExecutor = Executors.newFixedThreadPool(3); private OzoneConfiguration conf = new OzoneConfiguration(); -; + static List> recoveryCases() { // TODO better name List> params = new ArrayList<>(); params.add(emptySet()); // non-recovery From c2fa09c8b3dc0c4e3af59df4f3f9a4e1460e1424 Mon Sep 17 00:00:00 2001 From: saketa Date: Wed, 20 Mar 2024 18:39:54 -0700 Subject: [PATCH 14/15] HDDS-5865. Getting verifyChecksum from client configuration in BlockInputStream.java --- .../hadoop/hdds/scm/OzoneClientConfig.java | 2 +- .../hdds/scm/storage/BlockInputStream.java | 8 +- .../hdds/scm/storage/ExtendedInputStream.java | 1 - .../client/io/BlockInputStreamFactory.java | 3 +- .../io/BlockInputStreamFactoryImpl.java | 7 +- .../ozone/client/io/ECBlockInputStream.java | 6 +- .../client/io/ECBlockInputStreamFactory.java | 3 +- .../io/ECBlockInputStreamFactoryImpl.java | 7 +- .../client/io/ECBlockInputStreamProxy.java | 6 +- ...ECBlockReconstructedStripeInputStream.java | 4 +- .../scm/storage/DummyBlockInputStream.java | 3 +- .../DummyBlockInputStreamWithRetry.java | 3 +- .../scm/storage/TestBlockInputStream.java | 24 ++-- .../ozone/client/io/ECStreamTestUtil.java | 3 +- .../io/TestBlockInputStreamFactoryImpl.java | 12 +- .../client/io/TestECBlockInputStream.java | 127 +++++++++++------- .../io/TestECBlockInputStreamProxy.java | 8 +- .../TestECBlockReconstructedInputStream.java | 6 +- ...ECBlockReconstructedStripeInputStream.java | 6 +- .../ECReconstructionCoordinator.java | 6 +- .../ozone/client/io/KeyInputStream.java | 13 +- .../hadoop/ozone/client/rpc/RpcClient.java | 9 +- .../ozone/client/io/TestKeyInputStreamEC.java | 9 +- .../apache/hadoop/ozone/TestBlockTokens.java | 7 +- .../hadoop/ozone/om/TestChunkStreams.java | 6 +- 25 files changed, 163 insertions(+), 126 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 32ba6081e4f..741ae5dd423 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -144,7 +144,7 @@ public enum ChecksumCombineMode { tags = ConfigTag.CLIENT) private int retryInterval = 0; - @Config(key = "max.read.retries", + @Config(key = "read.max.retries", defaultValue = "3", description = "Maximum number of retries by Ozone Client on " + "encountering connectivity exception when reading a key.", diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 83ea332ca4d..b62415395df 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -112,9 +112,8 @@ public class BlockInputStream extends BlockExtendedInputStream { private final Function refreshFunction; - @SuppressWarnings("checkstyle:ParameterNumber") public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverClientFactory, Function refreshFunction, OzoneClientConfig config) { @@ -122,7 +121,7 @@ public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, this.length = blockLen; setPipeline(pipeline); tokenRef.set(token); - this.verifyChecksum = verifyChecksum; + this.verifyChecksum = config.isChecksumVerify(); this.xceiverClientFactory = xceiverClientFactory; this.refreshFunction = refreshFunction; this.retryPolicy = @@ -132,11 +131,10 @@ public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverClientFactory, OzoneClientConfig config ) { - this(blockId, blockLen, pipeline, token, verifyChecksum, + this(blockId, blockLen, pipeline, token, xceiverClientFactory, null, config); } /** diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java index 15ca88baf0f..3e78abbf485 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java @@ -101,5 +101,4 @@ public boolean hasCapability(String capability) { return false; } } - } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java index b58dc3886a7..6f8a744f762 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java @@ -47,10 +47,9 @@ public interface BlockInputStreamFactory { * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. */ - @SuppressWarnings("checkstyle:ParameterNumber") BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverFactory, Function refreshFunction, OzoneClientConfig config); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java index 020dc8f7322..6bcdc3c4811 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java @@ -75,20 +75,19 @@ public BlockInputStreamFactoryImpl(ByteBufferPool byteBufferPool, * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. */ - @SuppressWarnings("checkstyle:ParameterNumber") public BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverFactory, Function refreshFunction, OzoneClientConfig config) { if (repConfig.getReplicationType().equals(HddsProtos.ReplicationType.EC)) { return new ECBlockInputStreamProxy((ECReplicationConfig)repConfig, - blockInfo, verifyChecksum, xceiverFactory, refreshFunction, + blockInfo, xceiverFactory, refreshFunction, ecBlockStreamFactory, config); } else { return new BlockInputStream(blockInfo.getBlockID(), blockInfo.getLength(), - pipeline, token, verifyChecksum, xceiverFactory, refreshFunction, + pipeline, token, xceiverFactory, refreshFunction, config); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index fc17a776c72..8dc07f129b9 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -61,7 +61,6 @@ public class ECBlockInputStream extends BlockExtendedInputStream { private final int ecChunkSize; private final long stripeSize; private final BlockInputStreamFactory streamFactory; - private final boolean verifyChecksum; private final XceiverClientFactory xceiverClientFactory; private final Function refreshFunction; private final BlockLocationInfo blockInfo; @@ -109,14 +108,13 @@ protected int availableDataLocations(int expectedLocations) { } public ECBlockInputStream(ECReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverClientFactory, Function refreshFunction, BlockInputStreamFactory streamFactory, OzoneClientConfig config) { this.repConfig = repConfig; this.ecChunkSize = repConfig.getEcChunkSize(); - this.verifyChecksum = verifyChecksum; this.blockInfo = blockInfo; this.streamFactory = streamFactory; this.xceiverClientFactory = xceiverClientFactory; @@ -194,7 +192,7 @@ protected BlockExtendedInputStream getOrOpenStream(int locationIndex) { StandaloneReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE), blkInfo, pipeline, - blockInfo.getToken(), verifyChecksum, xceiverClientFactory, + blockInfo.getToken(), xceiverClientFactory, ecPipelineRefreshFunction(locationIndex + 1, refreshFunction), config); blockStreams[locationIndex] = stream; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java index 538f0cb9fcf..66e7a31337a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java @@ -50,10 +50,9 @@ public interface ECBlockInputStreamFactory { * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. */ - @SuppressWarnings("checkstyle:ParameterNumber") BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverFactory, Function refreshFunction, OzoneClientConfig config); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java index ee854dd78c6..01d0b0a7b7e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java @@ -73,10 +73,9 @@ private ECBlockInputStreamFactoryImpl(BlockInputStreamFactory streamFactory, * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. */ - @SuppressWarnings("checkstyle:ParameterNumber") public BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverFactory, Function refreshFunction, OzoneClientConfig config) { @@ -84,7 +83,7 @@ public BlockExtendedInputStream create(boolean missingLocations, // We create the reconstruction reader ECBlockReconstructedStripeInputStream sis = new ECBlockReconstructedStripeInputStream( - (ECReplicationConfig)repConfig, blockInfo, verifyChecksum, + (ECReplicationConfig)repConfig, blockInfo, xceiverFactory, refreshFunction, inputStreamFactory, byteBufferPool, ecReconstructExecutorSupplier.get(), config); if (failedLocations != null) { @@ -95,7 +94,7 @@ public BlockExtendedInputStream create(boolean missingLocations, } else { // Otherwise create the more efficient non-reconstruction reader return new ECBlockInputStream((ECReplicationConfig)repConfig, blockInfo, - verifyChecksum, xceiverFactory, refreshFunction, inputStreamFactory, + xceiverFactory, refreshFunction, inputStreamFactory, config); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java index 517ec249fcd..68a0337cef1 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java @@ -50,7 +50,6 @@ public class ECBlockInputStreamProxy extends BlockExtendedInputStream { LoggerFactory.getLogger(ECBlockInputStreamProxy.class); private final ECReplicationConfig repConfig; - private final boolean verifyChecksum; private final XceiverClientFactory xceiverClientFactory; private final Function refreshFunction; private final BlockLocationInfo blockInfo; @@ -99,13 +98,12 @@ public static int availableDataLocations(Pipeline pipeline, } public ECBlockInputStreamProxy(ECReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverClientFactory, Function refreshFunction, ECBlockInputStreamFactory streamFactory, OzoneClientConfig config) { this.repConfig = repConfig; - this.verifyChecksum = verifyChecksum; this.blockInfo = blockInfo; this.ecBlockInputStreamFactory = streamFactory; this.xceiverClientFactory = xceiverClientFactory; @@ -128,7 +126,7 @@ private void createBlockReader() { .incECReconstructionTotal(); } blockReader = ecBlockInputStreamFactory.create(reconstructionReader, - failedLocations, repConfig, blockInfo, verifyChecksum, + failedLocations, repConfig, blockInfo, xceiverClientFactory, refreshFunction, config); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java index 015df017be2..31f94e0acad 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java @@ -153,14 +153,14 @@ public class ECBlockReconstructedStripeInputStream extends ECBlockInputStream { @SuppressWarnings("checkstyle:ParameterNumber") public ECBlockReconstructedStripeInputStream(ECReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverClientFactory, Function refreshFunction, BlockInputStreamFactory streamFactory, ByteBufferPool byteBufferPool, ExecutorService ecReconstructExecutor, OzoneClientConfig config) { - super(repConfig, blockInfo, verifyChecksum, xceiverClientFactory, + super(repConfig, blockInfo, xceiverClientFactory, refreshFunction, streamFactory, config); this.byteBufferPool = byteBufferPool; this.executor = ecReconstructExecutor; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java index bf46bf97560..a89097533d2 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java @@ -45,13 +45,12 @@ class DummyBlockInputStream extends BlockInputStream { long blockLen, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverClientManager, Function refreshFunction, List chunkList, Map chunks, OzoneClientConfig config) { - super(blockId, blockLen, pipeline, token, verifyChecksum, + super(blockId, blockLen, pipeline, token, xceiverClientManager, refreshFunction, config); this.chunkDataMap = chunks; this.chunks = chunkList; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java index 7c968c2ffff..6d12614228f 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java @@ -52,13 +52,12 @@ final class DummyBlockInputStreamWithRetry long blockLen, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverClientManager, List chunkList, Map chunkMap, AtomicBoolean isRerfreshed, IOException ioException, OzoneClientConfig config) { - super(blockId, blockLen, pipeline, token, verifyChecksum, + super(blockId, blockLen, pipeline, token, xceiverClientManager, blockID -> { isRerfreshed.set(true); try { diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index 87f144d59a3..21b088ce85f 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -96,11 +96,12 @@ public void setup() throws Exception { BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE); createChunkList(5); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); Pipeline pipeline = MockPipeline.createSingleNodePipeline(); blockStream = new DummyBlockInputStream(blockID, blockSize, pipeline, null, - false, null, refreshFunction, chunks, chunkDataMap, - conf.getObject(OzoneClientConfig.class)); + null, refreshFunction, chunks, chunkDataMap, clientConfig); } /** @@ -264,12 +265,14 @@ public void testRefreshPipelineFunction() throws Exception { BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); AtomicBoolean isRefreshed = new AtomicBoolean(); createChunkList(5); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); try (BlockInputStream blockInputStreamWithRetry = new DummyBlockInputStreamWithRetry(blockID, blockSize, MockPipeline.createSingleNodePipeline(), null, - false, null, chunks, chunkDataMap, isRefreshed, null, - conf.getObject(OzoneClientConfig.class))) { + null, chunks, chunkDataMap, isRefreshed, null, + clientConfig)) { assertFalse(isRefreshed.get()); seekAndVerify(50); byte[] b = new byte[200]; @@ -353,9 +356,10 @@ private static ChunkInputStream throwingChunkInputStream(IOException ex, private BlockInputStream createSubject(BlockID blockID, Pipeline pipeline, ChunkInputStream stream) { - return new DummyBlockInputStream(blockID, blockSize, pipeline, null, false, - null, refreshFunction, chunks, null, - conf.getObject(OzoneClientConfig.class)) { + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); + return new DummyBlockInputStream(blockID, blockSize, pipeline, null, + null, refreshFunction, chunks, null, clientConfig) { @Override protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; @@ -407,9 +411,11 @@ public void testRefreshOnReadFailureAfterUnbuffer(IOException ex) .thenReturn(blockLocationInfo); when(blockLocationInfo.getPipeline()).thenReturn(newPipeline); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); BlockInputStream subject = new BlockInputStream(blockID, blockSize, - pipeline, null, false, clientFactory, refreshFunction, - conf.getObject(OzoneClientConfig.class)) { + pipeline, null, clientFactory, refreshFunction, + clientConfig) { @Override protected List getChunkInfoListUsingClient() { return chunks; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java index 28fb38c0da6..049037bc4dc 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java @@ -256,11 +256,10 @@ public synchronized void setFailIndexes(Integer... fail) { failIndexes.addAll(Arrays.asList(fail)); } - @SuppressWarnings("checkstyle:ParameterNumber") public synchronized BlockExtendedInputStream create( ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverFactory, Function refreshFunction, OzoneClientConfig config) { diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java index 6d35cea1d7a..623f7a4f86f 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java @@ -56,10 +56,12 @@ public void testNonECGivesBlockInputStream() { BlockLocationInfo blockInfo = createKeyLocationInfo(repConfig, 3, 1024 * 1024 * 10); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), - blockInfo.getToken(), true, null, null, - conf.getObject(OzoneClientConfig.class)); + blockInfo.getToken(), null, null, + clientConfig); assertInstanceOf(BlockInputStream.class, stream); assertEquals(stream.getBlockID(), blockInfo.getBlockID()); assertEquals(stream.getLength(), blockInfo.getLength()); @@ -74,10 +76,12 @@ public void testECGivesECBlockInputStream() { BlockLocationInfo blockInfo = createKeyLocationInfo(repConfig, 5, 1024 * 1024 * 10); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), - blockInfo.getToken(), true, null, null, - conf.getObject(OzoneClientConfig.class)); + blockInfo.getToken(), null, null, + clientConfig); assertInstanceOf(ECBlockInputStreamProxy.class, stream); assertEquals(stream.getBlockID(), blockInfo.getBlockID()); assertEquals(stream.getLength(), blockInfo.getLength()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java index b7bc588c9d9..60974b35a95 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java @@ -75,17 +75,19 @@ public void testSufficientLocations() { // EC-3-2, 5MB block, so all 3 data locations are needed BlockLocationInfo keyInfo = ECStreamTestUtil .createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory(), - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertTrue(ecb.hasSufficientLocations()); } // EC-3-2, very large block, so all 3 data locations are needed keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5000 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory(), - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertTrue(ecb.hasSufficientLocations()); } @@ -95,8 +97,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory(), dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 1); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB - 1, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory(), - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertTrue(ecb.hasSufficientLocations()); } @@ -106,8 +108,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory(), dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 1); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory(), - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertFalse(ecb.hasSufficientLocations()); } @@ -119,8 +121,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory(), dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 5); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory(), - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertFalse(ecb.hasSufficientLocations()); } } @@ -132,9 +134,11 @@ public void testCorrectBlockSizePassedToBlockStreamLessThanCell() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB - 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); // We expect only 1 block stream and it should have a length passed of // ONEMB - 100. @@ -150,9 +154,11 @@ public void testCorrectBlockSizePassedToBlockStreamTwoCells() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB + 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -167,9 +173,11 @@ public void testCorrectBlockSizePassedToBlockStreamThreeCells() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 2 * ONEMB + 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -185,9 +193,11 @@ public void testCorrectBlockSizePassedToBlockStreamThreeFullAndPartialStripe() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 10 * ONEMB + 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(4 * ONEMB, streams.get(0).getLength()); @@ -203,9 +213,11 @@ public void testCorrectBlockSizePassedToBlockStreamSingleFullCell() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -219,9 +231,11 @@ public void testCorrectBlockSizePassedToBlockStreamSeveralFullCells() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 9 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(3 * ONEMB, streams.get(0).getLength()); @@ -234,9 +248,11 @@ public void testCorrectBlockSizePassedToBlockStreamSeveralFullCells() public void testSimpleRead() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -258,9 +274,11 @@ public void testSimpleRead() throws IOException { public void testSimpleReadUnderOneChunk() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 1, ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -278,9 +296,11 @@ public void testSimpleReadUnderOneChunk() throws IOException { public void testReadPastEOF() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 50); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -298,9 +318,11 @@ public void testReadCrossingMultipleECChunkBounds() throws IOException { 100); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { // EC Chunk size is 100 and 3-2. Create a byte buffer to read 3.5 chunks, // so 350 @@ -334,9 +356,11 @@ public void testSeekPastBlockLength() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { assertThrows(EOFException.class, () -> ecb.seek(1000)); } } @@ -347,9 +371,11 @@ public void testSeekToLength() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { // When seek more than the length, should throw EOFException. assertThrows(EOFException.class, () -> ecb.seek(101)); } @@ -361,9 +387,11 @@ public void testSeekToLengthZeroLengthBlock() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 0); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.seek(0); assertEquals(0, ecb.getPos()); assertEquals(0, ecb.getRemaining()); @@ -376,9 +404,11 @@ public void testSeekToValidPosition() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.seek(ONEMB - 1); assertEquals(ONEMB - 1, ecb.getPos()); assertEquals(ONEMB * 4 + 1, ecb.getRemaining()); @@ -406,9 +436,11 @@ public void testErrorReadingBlockReportsBadLocation() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { // Read a full stripe to ensure all streams are created in the stream // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); @@ -438,9 +470,11 @@ public void testNoErrorIfSpareLocationToRead() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 8 * ONEMB, datanodes); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { // Read a full stripe to ensure all streams are created in the stream // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); @@ -503,9 +537,11 @@ public void testEcPipelineRefreshFunction() { return blockLocation; }; + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class))) { + keyInfo, null, null, streamFactory, + clientConfig)) { Pipeline pipeline = ecb.ecPipelineRefreshFunction(3, refreshFunction) .apply(blockID) @@ -535,11 +571,10 @@ public synchronized List getBlockStreams() { return blockStreams; } - @SuppressWarnings("checkstyle:ParameterNumber") public synchronized BlockExtendedInputStream create( ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverFactory, + XceiverClientFactory xceiverFactory, Function refreshFunction, OzoneClientConfig config) { TestBlockInputStream stream = new TestBlockInputStream( diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java index 24e4272538e..ca0b9710a96 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java @@ -345,9 +345,11 @@ private void resetAndAdvanceDataGenerator(long position) { private ECBlockInputStreamProxy createBISProxy(ECReplicationConfig rConfig, BlockLocationInfo blockInfo) { + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); return new ECBlockInputStreamProxy( - rConfig, blockInfo, true, null, null, streamFactory, - conf.getObject(OzoneClientConfig.class)); + rConfig, blockInfo, null, null, streamFactory, + clientConfig); } private static class TestECBlockInputStreamFactory @@ -376,7 +378,7 @@ public List getFailedLocations() { public BlockExtendedInputStream create(boolean missingLocations, List failedDatanodes, ReplicationConfig repConfig, BlockLocationInfo blockInfo, - boolean verifyChecksum, XceiverClientFactory xceiverFactory, + XceiverClientFactory xceiverFactory, Function refreshFunction, OzoneClientConfig config) { this.failedLocations = failedDatanodes; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java index 7e67b26faec..6b60bef66af 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java @@ -77,9 +77,11 @@ private ECBlockReconstructedStripeInputStream createStripeInputStream( BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap); streamFactory.setCurrentPipeline(keyInfo.getPipeline()); - return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, true, + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); + return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, null, null, streamFactory, bufferPool, ecReconstructExecutor, - conf.getObject(OzoneClientConfig.class)); + clientConfig); } @Test diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index 53023e7826e..e526b12a514 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -811,9 +811,11 @@ public void testFailedLocationsAreNotRead() throws IOException { private ECBlockReconstructedStripeInputStream createInputStream( BlockLocationInfo keyInfo) { - return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, true, + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); + return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, null, null, streamFactory, bufferPool, ecReconstructExecutor, - conf.getObject(OzoneClientConfig.class)); + clientConfig); } private void addDataStreamsToFactory(ByteBuffer[] data, ByteBuffer[] parity) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index bca64996340..dccc271f6de 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -267,13 +267,15 @@ public void reconstructECBlockGroup(BlockLocationInfo blockLocationInfo, return; } + OzoneClientConfig clientConfig = this.ozoneClientConfig; + clientConfig.setChecksumVerify(true); try (ECBlockReconstructedStripeInputStream sis = new ECBlockReconstructedStripeInputStream( - repConfig, blockLocationInfo, true, + repConfig, blockLocationInfo, this.containerOperationClient.getXceiverClientManager(), null, this.blockInputStreamFactory, byteBufferPool, this.ecReconstructReadExecutor, - this.ozoneClientConfig)) { + clientConfig)) { ECBlockOutputStream[] targetBlockStreams = new ECBlockOutputStream[toReconstructIndexes.size()]; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index 31977023f43..2d40841ee49 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -59,7 +59,6 @@ private static List createStreams( OmKeyInfo keyInfo, List blockInfos, XceiverClientFactory xceiverClientFactory, - boolean verifyChecksum, Function retryFunction, BlockInputStreamFactory blockStreamFactory, OzoneClientConfig config) { @@ -93,7 +92,6 @@ private static List createStreams( omKeyLocationInfo, omKeyLocationInfo.getPipeline(), omKeyLocationInfo.getToken(), - verifyChecksum, xceiverClientFactory, retry, config); @@ -120,13 +118,12 @@ private static BlockLocationInfo getBlockLocationInfo(OmKeyInfo newKeyInfo, private static LengthInputStream getFromOmKeyInfo( OmKeyInfo keyInfo, XceiverClientFactory xceiverClientFactory, - boolean verifyChecksum, Function retryFunction, BlockInputStreamFactory blockStreamFactory, List locationInfos, OzoneClientConfig config) { List streams = createStreams(keyInfo, - locationInfos, xceiverClientFactory, verifyChecksum, retryFunction, + locationInfos, xceiverClientFactory, retryFunction, blockStreamFactory, config); KeyInputStream keyInputStream = new KeyInputStream(keyInfo.getKeyName(), streams); @@ -138,19 +135,19 @@ private static LengthInputStream getFromOmKeyInfo( */ public static LengthInputStream getFromOmKeyInfo(OmKeyInfo keyInfo, XceiverClientFactory xceiverClientFactory, - boolean verifyChecksum, Function retryFunction, + Function retryFunction, BlockInputStreamFactory blockStreamFactory, OzoneClientConfig config) { List keyLocationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); - return getFromOmKeyInfo(keyInfo, xceiverClientFactory, verifyChecksum, + return getFromOmKeyInfo(keyInfo, xceiverClientFactory, retryFunction, blockStreamFactory, keyLocationInfos, config); } public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo, - XceiverClientFactory xceiverClientFactory, boolean verifyChecksum, + XceiverClientFactory xceiverClientFactory, Function retryFunction, BlockInputStreamFactory blockStreamFactory, OzoneClientConfig config) { @@ -168,7 +165,7 @@ public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo, // Create a KeyInputStream for each part. for (List locationInfo : partsToBlocksMap.values()) { lengthInputStreams.add(getFromOmKeyInfo(keyInfo, xceiverClientFactory, - verifyChecksum, retryFunction, blockStreamFactory, locationInfo, + retryFunction, blockStreamFactory, locationInfo, config)); } return lengthInputStreams; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index c1ac9e213e1..3a4d391b006 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -2257,8 +2257,7 @@ private OzoneInputStream createInputStream( if (feInfo == null) { LengthInputStream lengthInputStream = KeyInputStream - .getFromOmKeyInfo(keyInfo, xceiverClientManager, - clientConfig.isChecksumVerify(), retryFunction, + .getFromOmKeyInfo(keyInfo, xceiverClientManager, retryFunction, blockInputStreamFactory, clientConfig); try { final GDPRSymmetricKey gk = getGDPRSymmetricKey( @@ -2274,8 +2273,7 @@ private OzoneInputStream createInputStream( } else if (!keyInfo.getLatestVersionLocations().isMultipartKey()) { // Regular Key with FileEncryptionInfo LengthInputStream lengthInputStream = KeyInputStream - .getFromOmKeyInfo(keyInfo, xceiverClientManager, - clientConfig.isChecksumVerify(), retryFunction, + .getFromOmKeyInfo(keyInfo, xceiverClientManager, retryFunction, blockInputStreamFactory, clientConfig); final KeyProvider.KeyVersion decrypted = getDEK(feInfo); final CryptoInputStream cryptoIn = @@ -2286,8 +2284,7 @@ private OzoneInputStream createInputStream( } else { // Multipart Key with FileEncryptionInfo List lengthInputStreams = KeyInputStream - .getStreamsFromKeyInfo(keyInfo, xceiverClientManager, - clientConfig.isChecksumVerify(), retryFunction, + .getStreamsFromKeyInfo(keyInfo, xceiverClientManager, retryFunction, blockInputStreamFactory, clientConfig); final KeyProvider.KeyVersion decrypted = getDEK(feInfo); diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java index 3651ae2401d..4d4a1ab4cba 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java @@ -41,7 +41,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.MB; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -72,11 +71,13 @@ public void testReadAgainstLargeBlockGroup() throws IOException { BlockInputStreamFactory mockStreamFactory = mock(BlockInputStreamFactory.class); when(mockStreamFactory.create(any(), any(), any(), any(), - anyBoolean(), any(), any(), any())).thenReturn(blockInputStream); + any(), any(), any())).thenReturn(blockInputStream); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (LengthInputStream kis = KeyInputStream.getFromOmKeyInfo(keyInfo, - null, true, null, mockStreamFactory, - conf.getObject(OzoneClientConfig.class))) { + null, null, mockStreamFactory, + clientConfig)) { byte[] buf = new byte[100]; int readBytes = kis.read(buf, 0, 100); assertEquals(100, readBytes); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index 1ad7af9ee0d..26c1868084f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -296,10 +296,11 @@ private void readData(OmKeyInfo keyInfo, Function retryFunc) throws IOException { XceiverClientFactory xceiverClientManager = ((RpcClient) client.getProxy()).getXceiverClientManager(); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); try (InputStream is = KeyInputStream.getFromOmKeyInfo(keyInfo, - xceiverClientManager, - false, retryFunc, blockInputStreamFactory, - conf.getObject(OzoneClientConfig.class))) { + xceiverClientManager, retryFunc, blockInputStreamFactory, + clientConfig)) { byte[] buf = new byte[100]; int readBytes = is.read(buf, 0, 100); assertEquals(100, readBytes); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java index d780d4d6f41..e0d5ef4084d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java @@ -94,8 +94,10 @@ private List createInputStreams(String dataString) { } private BlockInputStream createStream(byte[] buf, int offset) { - return new BlockInputStream(null, 100, null, null, true, null, - conf.getObject(OzoneClientConfig.class)) { + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); + return new BlockInputStream(null, 100, null, null, null, + clientConfig) { private long pos; private final ByteArrayInputStream in = new ByteArrayInputStream(buf, offset, 100); From 1932c79e82a72ed6b34259724280cdcc85650f34 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 21 Mar 2024 08:16:50 +0100 Subject: [PATCH 15/15] Fix typo in description --- .../main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 741ae5dd423..549735438a0 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -157,7 +157,7 @@ public enum ChecksumCombineMode { "Indicates the time duration in seconds a client will wait " + "before retrying a read key request on encountering " + "a connectivity excepetion from Datanodes . " - + "By default the interval is 1millisecond", + + "By default the interval is 1 second", tags = ConfigTag.CLIENT) private int readRetryInterval = 1;