diff --git a/docs/changelog/116739.yaml b/docs/changelog/116739.yaml new file mode 100644 index 0000000000000..ea3b1253a9008 --- /dev/null +++ b/docs/changelog/116739.yaml @@ -0,0 +1,5 @@ +pr: 116739 +summary: Change default Docker image to be based on UBI minimal instead of Ubuntu +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/reference/connector/docs/connectors-content-syncs.asciidoc b/docs/reference/connector/docs/connectors-content-syncs.asciidoc index f1745382677a2..0a2eb54047170 100644 --- a/docs/reference/connector/docs/connectors-content-syncs.asciidoc +++ b/docs/reference/connector/docs/connectors-content-syncs.asciidoc @@ -52,7 +52,7 @@ However, a fast, accessible third-party data source that stores huge amounts of [NOTE] ==== -Incremental syncs for the SharePoint Online connector use specific logic. +Incremental syncs for <> and <> connectors use specific logic. All other connectors use the same shared connector framework logic for incremental syncs. ==== diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index 2908c55789bab..e0100b1c5640b 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -85,7 +85,7 @@ You can deploy {es} in various ways. **Hosted options** * {cloud}/ec-getting-started-trial.html[*Elastic Cloud Hosted*]: {es} is available as part of the hosted Elastic Stack offering, deployed in the cloud with your provider of choice. Sign up for a https://cloud.elastic.co/registration[14-day free trial]. -* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless* (technical preview)]: Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14-day free trial]. +* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless*]: Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14-day free trial]. **Advanced options** diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java index 824f009bc7d8e..999f790ee8117 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java @@ -33,7 +33,7 @@ public class InternalAdjacencyMatrix extends InternalMultiBucketAggregation implements AdjacencyMatrix { - public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements AdjacencyMatrix.Bucket { + public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements AdjacencyMatrix.Bucket { private final String key; private final long docCount; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java index d7590f2126325..c4669b1c25224 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java @@ -34,7 +34,7 @@ public class InternalTimeSeries extends InternalMultiBucketAggregation repositoryAccessKey) + ); private static final MutableSettingsProvider keystoreSettings = new MutableSettingsProvider(); @@ -68,7 +75,7 @@ public void testReloadCredentialsFromKeystore() throws IOException { // Set up initial credentials final var accessKey1 = randomIdentifier(); - s3Fixture.setAccessKey(accessKey1); + repositoryAccessKey = accessKey1; keystoreSettings.put("s3.client.default.access_key", accessKey1); keystoreSettings.put("s3.client.default.secret_key", randomIdentifier()); cluster.updateStoredSecureSettings(); @@ -79,14 +86,14 @@ public void testReloadCredentialsFromKeystore() throws IOException { // Rotate credentials in blob store final var accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier); - s3Fixture.setAccessKey(accessKey2); + repositoryAccessKey = accessKey2; // Ensure that initial credentials now invalid final var accessDeniedException2 = expectThrows(ResponseException.class, () -> client().performRequest(verifyRequest)); assertThat(accessDeniedException2.getResponse().getStatusLine().getStatusCode(), equalTo(500)); assertThat( accessDeniedException2.getMessage(), - allOf(containsString("Bad access key"), containsString("Status Code: 403"), containsString("Error Code: AccessDenied")) + allOf(containsString("Access denied"), containsString("Status Code: 403"), containsString("Error Code: AccessDenied")) ); // Set up refreshed credentials diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java index 64cb3c3fd3a69..a3b154b4bdfed 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java @@ -10,8 +10,8 @@ package org.elasticsearch.repositories.s3; import fixture.aws.imds.Ec2ImdsHttpFixture; +import fixture.s3.DynamicS3Credentials; import fixture.s3.S3HttpFixture; -import fixture.s3.S3HttpFixtureWithSessionToken; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; @@ -34,27 +34,30 @@ public class RepositoryS3ClientYamlTestSuiteIT extends AbstractRepositoryS3Clien private static final String HASHED_SEED = Integer.toString(Murmur3HashFunction.hash(System.getProperty("tests.seed"))); private static final String TEMPORARY_SESSION_TOKEN = "session_token-" + HASHED_SEED; - private static final String IMDS_ACCESS_KEY = "imds-access-key-" + HASHED_SEED; - private static final String IMDS_SESSION_TOKEN = "imds-session-token-" + HASHED_SEED; private static final S3HttpFixture s3Fixture = new S3HttpFixture(); - private static final S3HttpFixtureWithSessionToken s3HttpFixtureWithSessionToken = new S3HttpFixtureWithSessionToken( + private static final S3HttpFixture s3HttpFixtureWithSessionToken = new S3HttpFixture( + true, "session_token_bucket", "session_token_base_path_integration_tests", - System.getProperty("s3TemporaryAccessKey"), - TEMPORARY_SESSION_TOKEN + S3HttpFixture.fixedAccessKeyAndToken(System.getProperty("s3TemporaryAccessKey"), TEMPORARY_SESSION_TOKEN) ); - private static final S3HttpFixtureWithSessionToken s3HttpFixtureWithImdsSessionToken = new S3HttpFixtureWithSessionToken( + private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + + private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( + dynamicS3Credentials::addValidCredentials, + Set.of() + ); + + private static final S3HttpFixture s3HttpFixtureWithImdsSessionToken = new S3HttpFixture( + true, "ec2_bucket", "ec2_base_path", - IMDS_ACCESS_KEY, - IMDS_SESSION_TOKEN + dynamicS3Credentials::isAuthorized ); - private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture(IMDS_ACCESS_KEY, IMDS_SESSION_TOKEN, Set.of()); - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") .keystore("s3.client.integration_test_permanent.access_key", System.getProperty("s3PermanentAccessKey")) diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java index a522c9b17145b..bbd003f506ead 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java @@ -10,12 +10,12 @@ package org.elasticsearch.repositories.s3; import fixture.aws.imds.Ec2ImdsHttpFixture; -import fixture.s3.S3HttpFixtureWithSessionToken; +import fixture.s3.DynamicS3Credentials; +import fixture.s3.S3HttpFixture; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.junit.ClassRule; @@ -26,23 +26,20 @@ public class RepositoryS3EcsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { - private static final String HASHED_SEED = Integer.toString(Murmur3HashFunction.hash(System.getProperty("tests.seed"))); - private static final String ECS_ACCESS_KEY = "ecs-access-key-" + HASHED_SEED; - private static final String ECS_SESSION_TOKEN = "ecs-session-token-" + HASHED_SEED; - - private static final S3HttpFixtureWithSessionToken s3Fixture = new S3HttpFixtureWithSessionToken( - "ecs_bucket", - "ecs_base_path", - ECS_ACCESS_KEY, - ECS_SESSION_TOKEN - ); + private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( - ECS_ACCESS_KEY, - ECS_SESSION_TOKEN, + dynamicS3Credentials::addValidCredentials, Set.of("/ecs_credentials_endpoint") ); + private static final S3HttpFixture s3Fixture = new S3HttpFixture( + true, + "ecs_bucket", + "ecs_base_path", + dynamicS3Credentials::isAuthorized + ); + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") .setting("s3.client.integration_test_ecs.endpoint", s3Fixture::getAddress) diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java index 24f03a6ae7624..7c4d719485113 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java @@ -9,8 +9,9 @@ package org.elasticsearch.repositories.s3; +import fixture.aws.sts.AwsStsHttpFixture; +import fixture.s3.DynamicS3Credentials; import fixture.s3.S3HttpFixture; -import fixture.s3.S3HttpFixtureWithSTS; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; @@ -24,13 +25,27 @@ public class RepositoryS3StsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { - public static final S3HttpFixture s3Fixture = new S3HttpFixture(); - private static final S3HttpFixtureWithSTS s3Sts = new S3HttpFixtureWithSTS(); + private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + + private static final S3HttpFixture s3HttpFixture = new S3HttpFixture( + true, + "sts_bucket", + "sts_base_path", + dynamicS3Credentials::isAuthorized + ); + + private static final AwsStsHttpFixture stsHttpFixture = new AwsStsHttpFixture(dynamicS3Credentials::addValidCredentials, """ + Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDans\ + FBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFO\ + zTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ"""); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") - .setting("s3.client.integration_test_sts.endpoint", s3Sts::getAddress) - .systemProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", () -> s3Sts.getAddress() + "/assume-role-with-web-identity") + .setting("s3.client.integration_test_sts.endpoint", s3HttpFixture::getAddress) + .systemProperty( + "com.amazonaws.sdk.stsMetadataServiceEndpointOverride", + () -> stsHttpFixture.getAddress() + "/assume-role-with-web-identity" + ) .configFile("repository-s3/aws-web-identity-token-file", Resource.fromClasspath("aws-web-identity-token-file")) .environment("AWS_WEB_IDENTITY_TOKEN_FILE", System.getProperty("awsWebIdentityTokenExternalLocation")) // // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the @@ -40,7 +55,7 @@ public class RepositoryS3StsClientYamlTestSuiteIT extends AbstractRepositoryS3Cl .build(); @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(s3Sts).around(cluster); + public static TestRule ruleChain = RuleChain.outerRule(s3HttpFixture).around(stsHttpFixture).around(cluster); @ParametersFactory public static Iterable parameters() throws Exception { diff --git a/muted-tests.yml b/muted-tests.yml index 37f36e9a19340..b3c34505e6561 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -240,6 +240,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/117524 - class: org.elasticsearch.repositories.s3.RepositoryS3EcsClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/117525 +- class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT + method: testConstantKeywordField + issue: https://github.com/elastic/elasticsearch/issues/117531 # Examples: # diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 8c80cee58f46c..76a6717ab1d09 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matcher; @@ -31,6 +32,7 @@ import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.test.MockLog.assertThatLogger; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.equalTo; @@ -211,4 +213,25 @@ public void testLogLocalHotThreads() { ) ); } + + @TestLogging(reason = "testing logging at various levels", value = "org.elasticsearch.action.admin.HotThreadsIT:TRACE") + public void testLogLocalCurrentThreadsInPlainText() { + final var level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR); + assertThatLogger( + () -> HotThreads.logLocalCurrentThreads(logger, level, getTestName()), + HotThreadsIT.class, + new MockLog.SeenEventExpectation( + "Should log hot threads header in plain text", + HotThreadsIT.class.getCanonicalName(), + level, + "testLogLocalCurrentThreadsInPlainText: Hot threads at" + ), + new MockLog.SeenEventExpectation( + "Should log hot threads cpu usage in plain text", + HotThreadsIT.class.getCanonicalName(), + level, + "cpu usage by thread" + ) + ); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index 90326abb381d0..85f0e2cf7e3ff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -398,7 +398,7 @@ public void testErrorCanRecoverOnRestart() throws Exception { FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); - assertTrue(masterFileSettingsService.watching()); + assertBusy(() -> assertTrue(masterFileSettingsService.watching())); assertFalse(dataFileSettingsService.watching()); writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); @@ -434,7 +434,7 @@ public void testNewErrorOnRestartReprocessing() throws Exception { FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); - assertTrue(masterFileSettingsService.watching()); + assertBusy(() -> assertTrue(masterFileSettingsService.watching())); assertFalse(dataFileSettingsService.watching()); writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 688d2aaf905a6..6567f48d6c232 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -206,7 +206,7 @@ static TransportVersion def(int id) { public static final TransportVersion INGEST_PIPELINE_CONFIGURATION_AS_MAP = def(8_797_00_0); public static final TransportVersion INDEXING_PRESSURE_THROTTLING_STATS = def(8_798_00_0); public static final TransportVersion REINDEX_DATA_STREAMS = def(8_799_00_0); - + public static final TransportVersion ESQL_REMOVE_NODE_LEVEL_PLAN = def(8_800_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 7a5f469a57fa1..6344aa2a72ca9 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -131,6 +131,7 @@ private static Version parseUnchecked(String version) { public static final IndexVersion ADD_ROLE_MAPPING_CLEANUP_MIGRATION = def(8_518_00_0, Version.LUCENE_9_12_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT_BACKPORT = def(8_519_00_0, Version.LUCENE_9_12_0); public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID_BACKPORT = def(8_520_00_0, Version.LUCENE_9_12_0); + public static final IndexVersion V8_DEPRECATE_SOURCE_MODE_MAPPER = def(8_521_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 9d0dc9635537b..e7c7ec3535b91 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -484,8 +484,7 @@ public boolean isStored() { } public static boolean onOrAfterDeprecateModeVersion(IndexVersion version) { - return version.onOrAfter(IndexVersions.DEPRECATE_SOURCE_MODE_MAPPER); - // Adjust versions after backporting. - // || version.between(IndexVersions.BACKPORT_DEPRECATE_SOURCE_MODE_MAPPER, IndexVersions.UPGRADE_TO_LUCENE_10_0_0); + return version.onOrAfter(IndexVersions.DEPRECATE_SOURCE_MODE_MAPPER) + || version.between(IndexVersions.V8_DEPRECATE_SOURCE_MODE_MAPPER, IndexVersions.UPGRADE_TO_LUCENE_10_0_0); } } diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index b14ef171ccd1d..8c903fdc634d3 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.io.OutputStreamWriter; +import java.io.StringWriter; import java.io.Writer; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; @@ -105,6 +106,33 @@ public static void logLocalHotThreads(Logger logger, Level level, String prefix, } } + /** + * Capture and log the current threads on the local node. Unlike hot threads this does not sample and captures current state only. + * Useful for capturing stack traces for unexpectedly-slow operations in production. The resulting message might be large, so it is + * split per thread and logged as multiple entries. + * + * @param logger The logger to use for the logging + * @param level The log level to use for the logging. + * @param prefix The prefix to emit on each chunk of the logging. + */ + public static void logLocalCurrentThreads(Logger logger, Level level, String prefix) { + if (logger.isEnabled(level) == false) { + return; + } + + try (var writer = new StringWriter()) { + new HotThreads().busiestThreads(500).threadElementsSnapshotCount(1).detect(writer, () -> { + logger.log(level, "{}: {}", prefix, writer.toString()); + writer.getBuffer().setLength(0); + }); + } catch (Exception e) { + logger.error( + () -> org.elasticsearch.common.Strings.format("failed to write local current threads with prefix [%s]", prefix), + e + ); + } + } + public enum ReportType { CPU("cpu"), @@ -192,11 +220,12 @@ public HotThreads sortOrder(SortOrder order) { } public void detect(Writer writer) throws Exception { + detect(writer, () -> {}); + } + + public void detect(Writer writer, Runnable onNextThread) throws Exception { synchronized (mutex) { - innerDetect(ManagementFactory.getThreadMXBean(), SunThreadInfo.INSTANCE, Thread.currentThread().getId(), (interval) -> { - Thread.sleep(interval); - return null; - }, writer); + innerDetect(ManagementFactory.getThreadMXBean(), SunThreadInfo.INSTANCE, Thread.currentThread().getId(), writer, onNextThread); } } @@ -245,13 +274,15 @@ Map getAllValidThreadInfos(ThreadMXBean threadBean, ThreadInfo[][] captureThreadStacks(ThreadMXBean threadBean, long[] threadIds) throws InterruptedException { ThreadInfo[][] result = new ThreadInfo[threadElementsSnapshotCount][]; - for (int j = 0; j < threadElementsSnapshotCount; j++) { - // NOTE, javadoc of getThreadInfo says: If a thread of the given ID is not alive or does not exist, - // null will be set in the corresponding element in the returned array. A thread is alive if it has - // been started and has not yet died. + + // NOTE, javadoc of getThreadInfo says: If a thread of the given ID is not alive or does not exist, + // null will be set in the corresponding element in the returned array. A thread is alive if it has + // been started and has not yet died. + for (int j = 0; j < threadElementsSnapshotCount - 1; j++) { result[j] = threadBean.getThreadInfo(threadIds, Integer.MAX_VALUE); Thread.sleep(threadElementsSnapshotDelay.millis()); } + result[threadElementsSnapshotCount - 1] = threadBean.getThreadInfo(threadIds, Integer.MAX_VALUE); return result; } @@ -267,13 +298,8 @@ private double getTimeSharePercentage(long time) { return (((double) time) / interval.nanos()) * 100; } - void innerDetect( - ThreadMXBean threadBean, - SunThreadInfo sunThreadInfo, - long currentThreadId, - SleepFunction threadSleep, - Writer writer - ) throws Exception { + void innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long currentThreadId, Writer writer, Runnable onNextThread) + throws Exception { if (threadBean.isThreadCpuTimeSupported() == false) { throw new ElasticsearchException("thread CPU time is not supported on this JDK"); } @@ -297,10 +323,11 @@ void innerDetect( .append(", ignoreIdleThreads=") .append(Boolean.toString(ignoreIdleThreads)) .append(":\n"); + onNextThread.run(); // Capture before and after thread state with timings Map previousThreadInfos = getAllValidThreadInfos(threadBean, sunThreadInfo, currentThreadId); - threadSleep.apply(interval.millis()); + Thread.sleep(interval.millis()); Map latestThreadInfos = getAllValidThreadInfos(threadBean, sunThreadInfo, currentThreadId); latestThreadInfos.forEach((threadId, accumulator) -> accumulator.subtractPrevious(previousThreadInfos.get(threadId))); @@ -430,6 +457,7 @@ void innerDetect( } } } + onNextThread.run(); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index 5ba39c640abdc..ef21e4103fd88 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -220,7 +220,7 @@ private List reducePipelineBuckets(AggregationReduceContext reduceContext, Pi return reducedBuckets; } - public abstract static class InternalBucket implements Bucket, Writeable { + public abstract static class InternalBucket implements Bucket { public Object getProperty(String containingAggName, List path) { if (path.isEmpty()) { @@ -248,4 +248,8 @@ public Object getProperty(String containingAggName, List path) { return aggregation.getProperty(path.subList(1, path.size())); } } + + /** A {@link InternalBucket} that implements the {@link Writeable} interface. Most implementation might want + * to use this one except when specific logic is need to write into the stream. */ + public abstract static class InternalBucketWritable extends InternalBucket implements Writeable {} } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index faa953e77edd8..1492e97e6a5a5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -320,7 +320,9 @@ public int hashCode() { return Objects.hash(super.hashCode(), size, buckets, afterKey, Arrays.hashCode(reverseMuls), Arrays.hashCode(missingOrders)); } - public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements CompositeAggregation.Bucket { + public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable + implements + CompositeAggregation.Bucket { private final CompositeKey key; private final long docCount; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java index c05759582346a..19cd0df9c7122 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java @@ -30,7 +30,7 @@ import java.util.Objects; public class InternalFilters extends InternalMultiBucketAggregation implements Filters { - public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements Filters.Bucket { + public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements Filters.Bucket { private final String key; private long docCount; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java index 9e3c96da2e70b..60de4c3974c92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.util.Objects; -public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket +public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements GeoGrid.Bucket, Comparable { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java index 16a83ed04e524..7806d8cd8efe2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java @@ -16,7 +16,7 @@ /** * A bucket in the histogram where documents fall in */ -public abstract class AbstractHistogramBucket extends InternalMultiBucketAggregation.InternalBucket { +public abstract class AbstractHistogramBucket extends InternalMultiBucketAggregation.InternalBucketWritable { protected final long docCount; protected final InternalAggregations aggregations; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java index 5b456b3246b64..36a8fccc77e99 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java @@ -33,7 +33,7 @@ public class InternalIpPrefix extends InternalMultiBucketAggregation { - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket + public static class Bucket extends InternalMultiBucketAggregation.InternalBucketWritable implements IpPrefix.Bucket, KeyComparable { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java index 9571dfebc6069..34a2ebea88440 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java @@ -36,7 +36,7 @@ public final class InternalBinaryRange extends InternalMultiBucketAggregation buckets, AggregationReduceContext context) { @@ -104,7 +104,7 @@ private B reduceBucket(List buckets, AggregationReduceContext context) { for (B bucket : buckets) { docCount += bucket.getDocCount(); if (docCountError != -1) { - if (bucket.getShowDocCountError() == false || bucket.getDocCountError() == -1) { + if (getShowDocCountError() == false || bucket.getDocCountError() == -1) { docCountError = -1; } else { docCountError += bucket.getDocCountError(); @@ -257,6 +257,7 @@ public void accept(InternalAggregation aggregation) { } otherDocCount[0] += terms.getSumOfOtherDocCounts(); final long thisAggDocCountError = getDocCountError(terms); + setDocCountError(thisAggDocCountError); if (sumDocCountError != -1) { if (thisAggDocCountError == -1) { sumDocCountError = -1; @@ -264,16 +265,17 @@ public void accept(InternalAggregation aggregation) { sumDocCountError += thisAggDocCountError; } } - setDocCountError(thisAggDocCountError); - for (B bucket : terms.getBuckets()) { - // If there is already a doc count error for this bucket - // subtract this aggs doc count error from it to make the - // new value for the bucket. This then means that when the - // final error for the bucket is calculated below we account - // for the existing error calculated in a previous reduce. - // Note that if the error is unbounded (-1) this will be fixed - // later in this method. - bucket.updateDocCountError(-thisAggDocCountError); + if (getShowDocCountError()) { + for (B bucket : terms.getBuckets()) { + // If there is already a doc count error for this bucket + // subtract this aggs doc count error from it to make the + // new value for the bucket. This then means that when the + // final error for the bucket is calculated below we account + // for the existing error calculated in a previous reduce. + // Note that if the error is unbounded (-1) this will be fixed + // later in this method. + bucket.updateDocCountError(-thisAggDocCountError); + } } if (terms.getBuckets().isEmpty() == false) { bucketsList.add(terms.getBuckets()); @@ -319,17 +321,17 @@ public InternalAggregation get() { result.add(bucket.reduced(AbstractInternalTerms.this::reduceBucket, reduceContext)); }); } - for (B r : result) { - if (sumDocCountError == -1) { - r.setDocCountError(-1); - } else { - r.updateDocCountError(sumDocCountError); + if (getShowDocCountError()) { + for (B r : result) { + if (sumDocCountError == -1) { + r.setDocCountError(-1); + } else { + r.updateDocCountError(sumDocCountError); + } } } - long docCountError; - if (sumDocCountError == -1) { - docCountError = -1; - } else { + long docCountError = -1; + if (sumDocCountError != -1) { docCountError = size == 1 ? 0 : sumDocCountError; } return create(name, result, reduceContext.isFinalReduce() ? getOrder() : thisReduceOrder, docCountError, otherDocCount[0]); @@ -349,7 +351,7 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { b -> createBucket( samplingContext.scaleUp(b.getDocCount()), InternalAggregations.finalizeSampling(b.getAggregations(), samplingContext), - b.getShowDocCountError() ? samplingContext.scaleUp(b.getDocCountError()) : 0, + getShowDocCountError() ? samplingContext.scaleUp(b.getDocCountError()) : 0, b ) ) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index 9789a9edc58f7..5c28c25de6e87 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -164,8 +164,8 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) prototype.term, prototype.docCount, aggregations, - prototype.showDocCountError, - prototype.docCountError, + showTermDocCountError, + prototype.getDocCountError(), prototype.format ); } @@ -216,6 +216,6 @@ public void close() { @Override protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, DoubleTerms.Bucket prototype) { - return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format); + return new Bucket(prototype.term, docCount, aggs, showTermDocCountError, docCountError, format); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index db9da6ed67207..5a79155d1d4f5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -880,7 +880,6 @@ StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp, GlobalOrdLookup BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd)); StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format); result.bucketOrd = temp.bucketOrd; - result.docCountError = 0; return result; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java index 5b9403840dfff..d7087a121b4f4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java @@ -87,7 +87,10 @@ protected final void writeTermTypeInfoTo(StreamOutput out) throws IOException { writeSize(shardSize, out); out.writeBoolean(showTermDocCountError); out.writeVLong(otherDocCount); - out.writeCollection(buckets); + out.writeVInt(buckets.size()); + for (var bucket : buckets) { + bucket.writeTo(out, showTermDocCountError); + } } @Override @@ -95,6 +98,11 @@ protected void setDocCountError(long docCountError) { this.docCountError = docCountError; } + @Override + protected boolean getShowDocCountError() { + return showTermDocCountError; + } + @Override protected int getShardSize() { return shardSize; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java index 64cebee880141..7859319f4dd0d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.SetBackedScalingCuckooFilter; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.BucketOrder; @@ -29,10 +30,11 @@ public abstract class InternalRareTerms, B ext implements RareTerms { - public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucket + public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucketWritable implements RareTerms.Bucket, - KeyComparable { + KeyComparable, + Writeable { /** * Reads a bucket. Should be a constructor reference. */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java index 3f579947248bb..6c0eb465d1f80 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java @@ -45,7 +45,7 @@ public abstract class InternalSignificantTerms> extends InternalMultiBucketAggregation.InternalBucket + public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucketWritable implements SignificantTerms.Bucket { /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index b94b1f5ea40b1..739f0b923eaab 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -41,9 +41,8 @@ public interface Reader> { long bucketOrd; protected long docCount; - protected long docCountError; + private long docCountError; protected InternalAggregations aggregations; - protected final boolean showDocCountError; protected final DocValueFormat format; protected Bucket( @@ -53,29 +52,23 @@ protected Bucket( long docCountError, DocValueFormat formatter ) { - this.showDocCountError = showDocCountError; this.format = formatter; this.docCount = docCount; this.aggregations = aggregations; - this.docCountError = docCountError; + this.docCountError = showDocCountError ? docCountError : -1; } /** * Read from a stream. */ protected Bucket(StreamInput in, DocValueFormat formatter, boolean showDocCountError) throws IOException { - this.showDocCountError = showDocCountError; this.format = formatter; docCount = in.readVLong(); - docCountError = -1; - if (showDocCountError) { - docCountError = in.readLong(); - } + docCountError = showDocCountError ? in.readLong() : -1; aggregations = InternalAggregations.readFrom(in); } - @Override - public final void writeTo(StreamOutput out) throws IOException { + final void writeTo(StreamOutput out, boolean showDocCountError) throws IOException { out.writeVLong(getDocCount()); if (showDocCountError) { out.writeLong(docCountError); @@ -105,9 +98,6 @@ public void setBucketOrd(long bucketOrd) { @Override public long getDocCountError() { - if (showDocCountError == false) { - throw new IllegalStateException("show_terms_doc_count_error is false"); - } return docCountError; } @@ -121,11 +111,6 @@ protected void updateDocCountError(long docCountErrorDiff) { this.docCountError += docCountErrorDiff; } - @Override - protected boolean getShowDocCountError() { - return showDocCountError; - } - @Override public InternalAggregations getAggregations() { return aggregations; @@ -155,23 +140,15 @@ public boolean equals(Object obj) { return false; } Bucket that = (Bucket) obj; - if (showDocCountError && docCountError != that.docCountError) { - /* - * docCountError doesn't matter if not showing it and - * serialization sets it to -1 no matter what it was - * before. - */ - return false; - } - return Objects.equals(docCount, that.docCount) - && Objects.equals(showDocCountError, that.showDocCountError) + return Objects.equals(docCountError, that.docCountError) + && Objects.equals(docCount, that.docCount) && Objects.equals(format, that.format) && Objects.equals(aggregations, that.aggregations); } @Override public int hashCode() { - return Objects.hash(getClass(), docCount, format, showDocCountError, showDocCountError ? docCountError : -1, aggregations); + return Objects.hash(getClass(), docCount, format, docCountError, aggregations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index f536b7f958ca2..6c2444379c8eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -178,8 +178,8 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) prototype.term, prototype.docCount, aggregations, - prototype.showDocCountError, - prototype.docCountError, + showTermDocCountError, + prototype.getDocCountError(), prototype.format ); } @@ -260,7 +260,7 @@ public InternalAggregation get() { @Override protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, LongTerms.Bucket prototype) { - return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format); + return new Bucket(prototype.term, docCount, aggs, showTermDocCountError, docCountError, format); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 5faf6e0aaaedf..2370827230c47 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -184,15 +184,15 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) prototype.termBytes, prototype.docCount, aggregations, - prototype.showDocCountError, - prototype.docCountError, + showTermDocCountError, + prototype.getDocCountError(), prototype.format ); } @Override protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, StringTerms.Bucket prototype) { - return new Bucket(prototype.termBytes, docCount, aggs, prototype.showDocCountError, docCountError, format); + return new Bucket(prototype.termBytes, docCount, aggs, showTermDocCountError, docCountError, format); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index 8047d1f06990f..e82a2b7fe9235 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -114,6 +114,11 @@ public final XContentBuilder doXContentBody(XContentBuilder builder, Params para return doXContentCommon(builder, params, false, 0L, 0, Collections.emptyList()); } + @Override + protected boolean getShowDocCountError() { + return false; + } + @Override protected void setDocCountError(long docCountError) {} diff --git a/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java b/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java index 93c40185f62ac..37eb69c0ca409 100644 --- a/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java @@ -947,7 +947,7 @@ private static String innerDetect( long currentThreadId ) throws Exception { try (var writer = new StringWriter()) { - hotThreads.innerDetect(mockedMthreadMXBeanBean, sunThreadInfo, currentThreadId, (interval) -> null, writer); + hotThreads.innerDetect(mockedMthreadMXBeanBean, sunThreadInfo, currentThreadId, writer, () -> {}); return writer.toString(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java index b2f79c02baf8d..626adc9a7c41c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; @@ -56,10 +55,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws }; InternalMultiBucketAggregation.InternalBucket bucket = new InternalMultiBucketAggregation.InternalBucket() { - @Override - public void writeTo(StreamOutput out) throws IOException { - - } @Override public Object getKey() { @@ -131,10 +126,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws }; InternalMultiBucketAggregation.InternalBucket bucket = new InternalMultiBucketAggregation.InternalBucket() { - @Override - public void writeTo(StreamOutput out) throws IOException { - - } @Override public Object getKey() { diff --git a/settings.gradle b/settings.gradle index 7bf03263031f1..4722fc311480a 100644 --- a/settings.gradle +++ b/settings.gradle @@ -86,6 +86,7 @@ List projects = [ 'distribution:tools:ansi-console', 'server', 'test:framework', + 'test:fixtures:aws-sts-fixture', 'test:fixtures:azure-fixture', 'test:fixtures:ec2-imds-fixture', 'test:fixtures:gcs-fixture', diff --git a/test/fixtures/aws-sts-fixture/build.gradle b/test/fixtures/aws-sts-fixture/build.gradle new file mode 100644 index 0000000000000..57f0f8fe25493 --- /dev/null +++ b/test/fixtures/aws-sts-fixture/build.gradle @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +apply plugin: 'elasticsearch.java' + +description = 'Fixture for emulating the Security Token Service (STS) running in AWS' + +dependencies { + api project(':server') + api("junit:junit:${versions.junit}") { + transitive = false + } + api project(':test:framework') +} diff --git a/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpFixture.java b/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpFixture.java new file mode 100644 index 0000000000000..13ba7eaf8ba67 --- /dev/null +++ b/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpFixture.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package fixture.aws.sts; + +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; + +import org.junit.rules.ExternalResource; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Objects; +import java.util.function.BiConsumer; + +public class AwsStsHttpFixture extends ExternalResource { + + private HttpServer server; + + private final BiConsumer newCredentialsConsumer; + private final String webIdentityToken; + + public AwsStsHttpFixture(BiConsumer newCredentialsConsumer, String webIdentityToken) { + this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); + this.webIdentityToken = Objects.requireNonNull(webIdentityToken); + } + + protected HttpHandler createHandler() { + return new AwsStsHttpHandler(newCredentialsConsumer, webIdentityToken); + } + + public String getAddress() { + return "http://" + server.getAddress().getHostString() + ":" + server.getAddress().getPort(); + } + + public void stop(int delay) { + server.stop(delay); + } + + protected void before() throws Throwable { + server = HttpServer.create(resolveAddress(), 0); + server.createContext("/", Objects.requireNonNull(createHandler())); + server.start(); + } + + @Override + protected void after() { + stop(0); + } + + private static InetSocketAddress resolveAddress() { + try { + return new InetSocketAddress(InetAddress.getByName("localhost"), 0); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } +} diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java b/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java similarity index 66% rename from test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java rename to test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java index 54e0be1e321a2..84541f5e15211 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java +++ b/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java @@ -6,12 +6,16 @@ * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ -package fixture.s3; +package fixture.aws.sts; +import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.time.ZonedDateTime; @@ -19,53 +23,39 @@ import java.util.Arrays; import java.util.Locale; import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; import java.util.stream.Collectors; -public class S3HttpFixtureWithSTS extends S3HttpFixture { +import static org.elasticsearch.test.ESTestCase.randomIdentifier; - private static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; - private static final String ROLE_NAME = "sts-fixture-test"; - private final String sessionToken; - private final String webIdentityToken; +/** + * Minimal HTTP handler that emulates the AWS STS server + */ +@SuppressForbidden(reason = "this test uses a HttpServer to emulate the AWS STS endpoint") +public class AwsStsHttpHandler implements HttpHandler { - public S3HttpFixtureWithSTS() { - this(true); - } + static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; + static final String ROLE_NAME = "sts-fixture-test"; - public S3HttpFixtureWithSTS(boolean enabled) { - this( - enabled, - "sts_bucket", - "sts_base_path", - "sts_access_key", - "sts_session_token", - "Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDansFBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFOzTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ" - ); - } + private final BiConsumer newCredentialsConsumer; + private final String webIdentityToken; - public S3HttpFixtureWithSTS( - boolean enabled, - String bucket, - String basePath, - String accessKey, - String sessionToken, - String webIdentityToken - ) { - super(enabled, bucket, basePath, accessKey); - this.sessionToken = sessionToken; - this.webIdentityToken = webIdentityToken; + public AwsStsHttpHandler(BiConsumer newCredentialsConsumer, String webIdentityToken) { + this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); + this.webIdentityToken = Objects.requireNonNull(webIdentityToken); } @Override - protected HttpHandler createHandler() { - final HttpHandler delegate = super.createHandler(); + public void handle(final HttpExchange exchange) throws IOException { + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + + try (exchange) { + final var requestMethod = exchange.getRequestMethod(); + final var path = exchange.getRequestURI().getPath(); + + if ("POST".equals(requestMethod) && "/assume-role-with-web-identity/".equals(path)) { - return exchange -> { - // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html - // It's run as a separate service, but we emulate it under the `assume-role-with-web-identity` endpoint - // of the S3 serve for the simplicity sake - if ("POST".equals(exchange.getRequestMethod()) - && exchange.getRequestURI().getPath().startsWith("/assume-role-with-web-identity")) { String body = new String(exchange.getRequestBody().readAllBytes(), StandardCharsets.UTF_8); Map params = Arrays.stream(body.split("&")) .map(e -> e.split("=")) @@ -82,6 +72,9 @@ protected HttpHandler createHandler() { exchange.close(); return; } + final var accessKey = randomIdentifier(); + final var sessionToken = randomIdentifier(); + newCredentialsConsumer.accept(accessKey, sessionToken); final byte[] response = String.format( Locale.ROOT, """ @@ -95,7 +88,7 @@ protected HttpHandler createHandler() { %s - secret_access_key + %s %s %s @@ -109,6 +102,7 @@ protected HttpHandler createHandler() { ROLE_ARN, ROLE_NAME, sessionToken, + randomIdentifier(), ZonedDateTime.now().plusDays(1L).format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ssZ")), accessKey ).getBytes(StandardCharsets.UTF_8); @@ -118,7 +112,8 @@ protected HttpHandler createHandler() { exchange.close(); return; } - delegate.handle(exchange); - }; + + ExceptionsHelper.maybeDieOnAnotherThread(new AssertionError("not supported: " + requestMethod + " " + path)); + } } } diff --git a/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java b/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java new file mode 100644 index 0000000000000..4094ce18e7aef --- /dev/null +++ b/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java @@ -0,0 +1,268 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.aws.sts; + +import com.sun.net.httpserver.Headers; +import com.sun.net.httpserver.HttpContext; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpPrincipal; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.containsString; + +public class AwsStsHttpHandlerTests extends ESTestCase { + + public void testGenerateCredentials() { + final Map generatedCredentials = new HashMap<>(); + + final var webIdentityToken = randomUnicodeOfLength(10); + final var handler = new AwsStsHttpHandler(generatedCredentials::put, webIdentityToken); + + final var response = handleRequest( + handler, + Map.of( + "Action", + "AssumeRoleWithWebIdentity", + "RoleSessionName", + AwsStsHttpHandler.ROLE_NAME, + "RoleArn", + AwsStsHttpHandler.ROLE_ARN, + "WebIdentityToken", + webIdentityToken + ) + ); + assertEquals(RestStatus.OK, response.status()); + + assertThat(generatedCredentials, aMapWithSize(1)); + final var accessKey = generatedCredentials.keySet().iterator().next(); + final var sessionToken = generatedCredentials.values().iterator().next(); + + final var responseBody = response.body().utf8ToString(); + assertThat(responseBody, containsString("" + accessKey + "")); + assertThat(responseBody, containsString("" + sessionToken + "")); + } + + public void testInvalidAction() { + final var handler = new AwsStsHttpHandler((key, token) -> fail(), randomUnicodeOfLength(10)); + final var response = handleRequest(handler, Map.of("Action", "Unsupported")); + assertEquals(RestStatus.BAD_REQUEST, response.status()); + } + + public void testInvalidRole() { + final var webIdentityToken = randomUnicodeOfLength(10); + final var handler = new AwsStsHttpHandler((key, token) -> fail(), webIdentityToken); + final var response = handleRequest( + handler, + Map.of( + "Action", + "AssumeRoleWithWebIdentity", + "RoleSessionName", + randomValueOtherThan(AwsStsHttpHandler.ROLE_NAME, ESTestCase::randomIdentifier), + "RoleArn", + AwsStsHttpHandler.ROLE_ARN, + "WebIdentityToken", + webIdentityToken + ) + ); + assertEquals(RestStatus.UNAUTHORIZED, response.status()); + } + + public void testInvalidToken() { + final var webIdentityToken = randomUnicodeOfLength(10); + final var handler = new AwsStsHttpHandler((key, token) -> fail(), webIdentityToken); + final var response = handleRequest( + handler, + Map.of( + "Action", + "AssumeRoleWithWebIdentity", + "RoleSessionName", + AwsStsHttpHandler.ROLE_NAME, + "RoleArn", + AwsStsHttpHandler.ROLE_ARN, + "WebIdentityToken", + randomValueOtherThan(webIdentityToken, () -> randomUnicodeOfLength(10)) + ) + ); + assertEquals(RestStatus.UNAUTHORIZED, response.status()); + } + + public void testInvalidARN() { + final var webIdentityToken = randomUnicodeOfLength(10); + final var handler = new AwsStsHttpHandler((key, token) -> fail(), webIdentityToken); + final var response = handleRequest( + handler, + Map.of( + "Action", + "AssumeRoleWithWebIdentity", + "RoleSessionName", + AwsStsHttpHandler.ROLE_NAME, + "RoleArn", + randomValueOtherThan(AwsStsHttpHandler.ROLE_ARN, ESTestCase::randomIdentifier), + "WebIdentityToken", + webIdentityToken + ) + ); + assertEquals(RestStatus.UNAUTHORIZED, response.status()); + } + + private record TestHttpResponse(RestStatus status, BytesReference body) {} + + private static TestHttpResponse handleRequest(AwsStsHttpHandler handler, Map body) { + final var httpExchange = new TestHttpExchange( + "POST", + "/assume-role-with-web-identity/", + new BytesArray( + body.entrySet() + .stream() + .map(e -> e.getKey() + "=" + URLEncoder.encode(e.getValue(), StandardCharsets.UTF_8)) + .collect(Collectors.joining("&")) + ), + TestHttpExchange.EMPTY_HEADERS + ); + try { + handler.handle(httpExchange); + } catch (IOException e) { + fail(e); + } + assertNotEquals(0, httpExchange.getResponseCode()); + return new TestHttpResponse(RestStatus.fromCode(httpExchange.getResponseCode()), httpExchange.getResponseBodyContents()); + } + + private static class TestHttpExchange extends HttpExchange { + + private static final Headers EMPTY_HEADERS = new Headers(); + + private final String method; + private final URI uri; + private final BytesReference requestBody; + private final Headers requestHeaders; + + private final Headers responseHeaders = new Headers(); + private final BytesStreamOutput responseBody = new BytesStreamOutput(); + private int responseCode; + + TestHttpExchange(String method, String uri, BytesReference requestBody, Headers requestHeaders) { + this.method = method; + this.uri = URI.create(uri); + this.requestBody = requestBody; + this.requestHeaders = requestHeaders; + } + + @Override + public Headers getRequestHeaders() { + return requestHeaders; + } + + @Override + public Headers getResponseHeaders() { + return responseHeaders; + } + + @Override + public URI getRequestURI() { + return uri; + } + + @Override + public String getRequestMethod() { + return method; + } + + @Override + public HttpContext getHttpContext() { + return null; + } + + @Override + public void close() {} + + @Override + public InputStream getRequestBody() { + try { + return requestBody.streamInput(); + } catch (IOException e) { + throw new AssertionError(e); + } + } + + @Override + public OutputStream getResponseBody() { + return responseBody; + } + + @Override + public void sendResponseHeaders(int rCode, long responseLength) { + this.responseCode = rCode; + } + + @Override + public InetSocketAddress getRemoteAddress() { + return null; + } + + @Override + public int getResponseCode() { + return responseCode; + } + + public BytesReference getResponseBodyContents() { + return responseBody.bytes(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return null; + } + + @Override + public String getProtocol() { + return "HTTP/1.1"; + } + + @Override + public Object getAttribute(String name) { + return null; + } + + @Override + public void setAttribute(String name, Object value) { + fail("setAttribute not implemented"); + } + + @Override + public void setStreams(InputStream i, OutputStream o) { + fail("setStreams not implemented"); + } + + @Override + public HttpPrincipal getPrincipal() { + fail("getPrincipal not implemented"); + throw new UnsupportedOperationException("getPrincipal not implemented"); + } + } + +} diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java index 68f46d778018c..13d36c6fc4812 100644 --- a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java @@ -18,23 +18,22 @@ import java.net.UnknownHostException; import java.util.Objects; import java.util.Set; +import java.util.function.BiConsumer; public class Ec2ImdsHttpFixture extends ExternalResource { private HttpServer server; - private final String accessKey; - private final String sessionToken; + private final BiConsumer newCredentialsConsumer; private final Set alternativeCredentialsEndpoints; - public Ec2ImdsHttpFixture(String accessKey, String sessionToken, Set alternativeCredentialsEndpoints) { - this.accessKey = accessKey; - this.sessionToken = sessionToken; - this.alternativeCredentialsEndpoints = alternativeCredentialsEndpoints; + public Ec2ImdsHttpFixture(BiConsumer newCredentialsConsumer, Set alternativeCredentialsEndpoints) { + this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); + this.alternativeCredentialsEndpoints = Objects.requireNonNull(alternativeCredentialsEndpoints); } protected HttpHandler createHandler() { - return new Ec2ImdsHttpHandler(accessKey, sessionToken, alternativeCredentialsEndpoints); + return new Ec2ImdsHttpHandler(newCredentialsConsumer, alternativeCredentialsEndpoints); } public String getAddress() { diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java index 04e5e83bddfa9..a92f1bdc5f9ae 100644 --- a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java @@ -25,6 +25,7 @@ import java.util.Collection; import java.util.Objects; import java.util.Set; +import java.util.function.BiConsumer; import static org.elasticsearch.test.ESTestCase.randomIdentifier; @@ -36,13 +37,11 @@ public class Ec2ImdsHttpHandler implements HttpHandler { private static final String IMDS_SECURITY_CREDENTIALS_PATH = "/latest/meta-data/iam/security-credentials/"; - private final String accessKey; - private final String sessionToken; + private final BiConsumer newCredentialsConsumer; private final Set validCredentialsEndpoints = ConcurrentCollections.newConcurrentSet(); - public Ec2ImdsHttpHandler(String accessKey, String sessionToken, Collection alternativeCredentialsEndpoints) { - this.accessKey = Objects.requireNonNull(accessKey); - this.sessionToken = Objects.requireNonNull(sessionToken); + public Ec2ImdsHttpHandler(BiConsumer newCredentialsConsumer, Collection alternativeCredentialsEndpoints) { + this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); this.validCredentialsEndpoints.addAll(alternativeCredentialsEndpoints); } @@ -70,6 +69,9 @@ public void handle(final HttpExchange exchange) throws IOException { exchange.getResponseBody().write(response); return; } else if (validCredentialsEndpoints.contains(path)) { + final String accessKey = randomIdentifier(); + final String sessionToken = randomIdentifier(); + newCredentialsConsumer.accept(accessKey, sessionToken); final byte[] response = Strings.format( """ { diff --git a/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java b/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java index 5d5cbfae3fa60..369b0ef449b2f 100644 --- a/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java +++ b/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java @@ -28,15 +28,18 @@ import java.io.OutputStream; import java.net.InetSocketAddress; import java.net.URI; +import java.util.HashMap; +import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.aMapWithSize; + public class Ec2ImdsHttpHandlerTests extends ESTestCase { public void testImdsV1() throws IOException { - final var accessKey = randomIdentifier(); - final var sessionToken = randomIdentifier(); + final Map generatedCredentials = new HashMap<>(); - final var handler = new Ec2ImdsHttpHandler(accessKey, sessionToken, Set.of()); + final var handler = new Ec2ImdsHttpHandler(generatedCredentials::put, Set.of()); final var roleResponse = handleRequest(handler, "GET", "/latest/meta-data/iam/security-credentials/"); assertEquals(RestStatus.OK, roleResponse.status()); @@ -46,6 +49,10 @@ public void testImdsV1() throws IOException { final var credentialsResponse = handleRequest(handler, "GET", "/latest/meta-data/iam/security-credentials/" + profileName); assertEquals(RestStatus.OK, credentialsResponse.status()); + assertThat(generatedCredentials, aMapWithSize(1)); + final var accessKey = generatedCredentials.keySet().iterator().next(); + final var sessionToken = generatedCredentials.values().iterator().next(); + final var responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), credentialsResponse.body().streamInput(), false); assertEquals(Set.of("AccessKeyId", "Expiration", "RoleArn", "SecretAccessKey", "Token"), responseMap.keySet()); assertEquals(accessKey, responseMap.get("AccessKeyId")); @@ -55,7 +62,7 @@ public void testImdsV1() throws IOException { public void testImdsV2Disabled() { assertEquals( RestStatus.METHOD_NOT_ALLOWED, - handleRequest(new Ec2ImdsHttpHandler(randomIdentifier(), randomIdentifier(), Set.of()), "PUT", "/latest/api/token").status() + handleRequest(new Ec2ImdsHttpHandler((accessKey, sessionToken) -> fail(), Set.of()), "PUT", "/latest/api/token").status() ); } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/DynamicS3Credentials.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/DynamicS3Credentials.java new file mode 100644 index 0000000000000..4e8f267ad3543 --- /dev/null +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/DynamicS3Credentials.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.s3; + +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; + +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Allows dynamic creation of access-key/session-token credentials for accessing AWS services such as S3. Typically there's one service + * (e.g. IMDS or STS) which creates credentials dynamically and registers them here using {@link #addValidCredentials}, and then the + * {@link S3HttpFixture} uses {@link #isAuthorized} to validate the credentials it receives corresponds with some previously-generated + * credentials. + */ +public class DynamicS3Credentials { + private final Map> validCredentialsMap = ConcurrentCollections.newConcurrentMap(); + + public boolean isAuthorized(String authorizationHeader, String sessionTokenHeader) { + return authorizationHeader != null + && sessionTokenHeader != null + && validCredentialsMap.getOrDefault(sessionTokenHeader, Set.of()).stream().anyMatch(authorizationHeader::contains); + } + + public void addValidCredentials(String accessKey, String sessionToken) { + validCredentialsMap.computeIfAbsent( + Objects.requireNonNull(sessionToken, "sessionToken"), + t -> ConcurrentCollections.newConcurrentSet() + ).add(Objects.requireNonNull(accessKey, "accessKey")); + } +} diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java index 421478a53e6bc..36f8fedcb3335 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java @@ -21,6 +21,8 @@ import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Objects; +import java.util.function.BiPredicate; +import java.util.function.Supplier; public class S3HttpFixture extends ExternalResource { @@ -29,21 +31,21 @@ public class S3HttpFixture extends ExternalResource { private final boolean enabled; private final String bucket; private final String basePath; - protected volatile String accessKey; + private final BiPredicate authorizationPredicate; public S3HttpFixture() { this(true); } public S3HttpFixture(boolean enabled) { - this(enabled, "bucket", "base_path_integration_tests", "s3_test_access_key"); + this(enabled, "bucket", "base_path_integration_tests", fixedAccessKey("s3_test_access_key")); } - public S3HttpFixture(boolean enabled, String bucket, String basePath, String accessKey) { + public S3HttpFixture(boolean enabled, String bucket, String basePath, BiPredicate authorizationPredicate) { this.enabled = enabled; this.bucket = bucket; this.basePath = basePath; - this.accessKey = accessKey; + this.authorizationPredicate = authorizationPredicate; } protected HttpHandler createHandler() { @@ -51,9 +53,11 @@ protected HttpHandler createHandler() { @Override public void handle(final HttpExchange exchange) throws IOException { try { - final String authorization = exchange.getRequestHeaders().getFirst("Authorization"); - if (authorization == null || authorization.contains(accessKey) == false) { - sendError(exchange, RestStatus.FORBIDDEN, "AccessDenied", "Bad access key"); + if (authorizationPredicate.test( + exchange.getRequestHeaders().getFirst("Authorization"), + exchange.getRequestHeaders().getFirst("x-amz-security-token") + ) == false) { + sendError(exchange, RestStatus.FORBIDDEN, "AccessDenied", "Access denied by " + authorizationPredicate); return; } super.handle(exchange); @@ -76,7 +80,7 @@ public void stop(int delay) { protected void before() throws Throwable { if (enabled) { - InetSocketAddress inetSocketAddress = resolveAddress("localhost", 0); + InetSocketAddress inetSocketAddress = resolveAddress(); this.server = HttpServer.create(inetSocketAddress, 0); HttpHandler handler = createHandler(); this.server.createContext("/", Objects.requireNonNull(handler)); @@ -91,15 +95,27 @@ protected void after() { } } - private static InetSocketAddress resolveAddress(String address, int port) { + private static InetSocketAddress resolveAddress() { try { - return new InetSocketAddress(InetAddress.getByName(address), port); + return new InetSocketAddress(InetAddress.getByName("localhost"), 0); } catch (UnknownHostException e) { throw new RuntimeException(e); } } - public void setAccessKey(String accessKey) { - this.accessKey = accessKey; + public static BiPredicate fixedAccessKey(String accessKey) { + return mutableAccessKey(() -> accessKey); + } + + public static BiPredicate mutableAccessKey(Supplier accessKeySupplier) { + return (authorizationHeader, sessionTokenHeader) -> authorizationHeader != null + && authorizationHeader.contains(accessKeySupplier.get()); + } + + public static BiPredicate fixedAccessKeyAndToken(String accessKey, String sessionToken) { + Objects.requireNonNull(sessionToken); + final var accessKeyPredicate = fixedAccessKey(accessKey); + return (authorizationHeader, sessionTokenHeader) -> accessKeyPredicate.test(authorizationHeader, sessionTokenHeader) + && sessionToken.equals(sessionTokenHeader); } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java deleted file mode 100644 index 001cc34d9b20d..0000000000000 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package fixture.s3; - -import com.sun.net.httpserver.HttpHandler; - -import org.elasticsearch.rest.RestStatus; - -import static fixture.s3.S3HttpHandler.sendError; - -public class S3HttpFixtureWithSessionToken extends S3HttpFixture { - - protected final String sessionToken; - - public S3HttpFixtureWithSessionToken(String bucket, String basePath, String accessKey, String sessionToken) { - super(true, bucket, basePath, accessKey); - this.sessionToken = sessionToken; - } - - @Override - protected HttpHandler createHandler() { - final HttpHandler delegate = super.createHandler(); - return exchange -> { - final String securityToken = exchange.getRequestHeaders().getFirst("x-amz-security-token"); - if (securityToken == null) { - sendError(exchange, RestStatus.FORBIDDEN, "AccessDenied", "No session token"); - return; - } - if (securityToken.equals(sessionToken) == false) { - sendError(exchange, RestStatus.FORBIDDEN, "AccessDenied", "Bad session token"); - return; - } - delegate.handle(exchange); - }; - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index d7c5c598ce978..af92eae8c8a19 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -281,7 +281,7 @@ public abstract class ESIntegTestCase extends ESTestCase { /** * Annotation for third-party integration tests. *

- * These are tests the require a third-party service in order to run. They + * These are tests, which require a third-party service in order to run. They * may require the user to manually configure an external process (such as rabbitmq), * or may additionally require some external configuration (e.g. AWS credentials) * via the {@code tests.config} system property. diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java index c6bfb5b1b2778..0d42a2856a10e 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java @@ -42,8 +42,7 @@ public static class Bucket extends AbstractInternalTerms.AbstractTermsBucket formats; protected List terms; protected List keyConverters; @@ -60,8 +59,7 @@ public Bucket( this.terms = terms; this.docCount = docCount; this.aggregations = aggregations; - this.showDocCountError = showDocCountError; - this.docCountError = docCountError; + this.docCountError = showDocCountError ? docCountError : -1; this.formats = formats; this.keyConverters = keyConverters; } @@ -71,7 +69,6 @@ protected Bucket(StreamInput in, List formats, List formats, List listener) { - final long reservedBytes = estimatedPageSizeInBytes.get(); + final long reservedBytes = allSourcesFinished ? 0 : estimatedPageSizeInBytes.get(); if (reservedBytes > 0) { // This doesn't fully protect ESQL from OOM, but reduces the likelihood. blockFactory.breaker().addEstimateBytesAndMaybeBreak(reservedBytes, "fetch page"); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java index 757a3262433c8..614c3fe0ecc5c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java @@ -93,7 +93,7 @@ public IsBlockedResult waitForWriting() { * @param sourceFinished if true, then this handler can finish as sources have enough pages. * @param listener the listener that will be notified when pages are ready or this handler is finished * @see RemoteSink - * @see ExchangeSourceHandler#addRemoteSink(RemoteSink, int) + * @see ExchangeSourceHandler#addRemoteSink(RemoteSink, boolean, int, ActionListener) */ public void fetchPageAsync(boolean sourceFinished, ActionListener listener) { if (sourceFinished) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index 4baaf9ad89bd6..61b3386ce0274 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -24,10 +24,10 @@ /** * An {@link ExchangeSourceHandler} asynchronously fetches pages and status from multiple {@link RemoteSink}s * and feeds them to its {@link ExchangeSource}, which are created using the {@link #createExchangeSource()}) method. - * {@link RemoteSink}s are added using the {@link #addRemoteSink(RemoteSink, int)}) method. + * {@link RemoteSink}s are added using the {@link #addRemoteSink(RemoteSink, boolean, int, ActionListener)}) method. * * @see #createExchangeSource() - * @see #addRemoteSink(RemoteSink, int) + * @see #addRemoteSink(RemoteSink, boolean, int, ActionListener) */ public final class ExchangeSourceHandler { private final ExchangeBuffer buffer; @@ -35,13 +35,43 @@ public final class ExchangeSourceHandler { private final PendingInstances outstandingSinks; private final PendingInstances outstandingSources; + // Collect failures that occur while fetching pages from the remote sink with `failFast=true`. + // The exchange source will stop fetching and abort as soon as any failure is added to this failure collector. + // The final failure collected will be notified to callers via the {@code completionListener}. private final FailureCollector failure = new FailureCollector(); - public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor) { + /** + * Creates a new ExchangeSourceHandler. + * + * @param maxBufferSize the maximum size of the exchange buffer. A larger buffer reduces ``pauses`` but uses more memory, + * which could otherwise be allocated for other purposes. + * @param fetchExecutor the executor used to fetch pages. + * @param completionListener a listener that will be notified when the exchange source handler fails or completes + */ + public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor, ActionListener completionListener) { this.buffer = new ExchangeBuffer(maxBufferSize); this.fetchExecutor = fetchExecutor; this.outstandingSinks = new PendingInstances(() -> buffer.finish(false)); this.outstandingSources = new PendingInstances(() -> buffer.finish(true)); + buffer.addCompletionListener(ActionListener.running(() -> { + final ActionListener listener = ActionListener.assertAtLeastOnce(completionListener).delegateFailure((l, unused) -> { + final Exception e = failure.getFailure(); + if (e != null) { + l.onFailure(e); + } else { + l.onResponse(null); + } + }); + try (RefCountingListener refs = new RefCountingListener(listener)) { + for (PendingInstances pending : List.of(outstandingSinks, outstandingSources)) { + // Create an outstanding instance and then finish to complete the completionListener + // if we haven't registered any instances of exchange sinks or exchange sources before. + pending.trackNewInstance(); + pending.completion.addListener(refs.acquire()); + pending.finishInstance(); + } + } + })); } private class ExchangeSourceImpl implements ExchangeSource { @@ -89,20 +119,6 @@ public int bufferSize() { } } - public void addCompletionListener(ActionListener listener) { - buffer.addCompletionListener(ActionListener.running(() -> { - try (RefCountingListener refs = new RefCountingListener(listener)) { - for (PendingInstances pending : List.of(outstandingSinks, outstandingSources)) { - // Create an outstanding instance and then finish to complete the completionListener - // if we haven't registered any instances of exchange sinks or exchange sources before. - pending.trackNewInstance(); - pending.completion.addListener(refs.acquire()); - pending.finishInstance(); - } - } - })); - } - /** * Create a new {@link ExchangeSource} for exchanging data * @@ -159,10 +175,14 @@ void exited() { private final class RemoteSinkFetcher { private volatile boolean finished = false; private final RemoteSink remoteSink; + private final boolean failFast; + private final ActionListener completionListener; - RemoteSinkFetcher(RemoteSink remoteSink) { + RemoteSinkFetcher(RemoteSink remoteSink, boolean failFast, ActionListener completionListener) { outstandingSinks.trackNewInstance(); this.remoteSink = remoteSink; + this.failFast = failFast; + this.completionListener = completionListener; } void fetchPage() { @@ -198,15 +218,22 @@ void fetchPage() { } void onSinkFailed(Exception e) { - failure.unwrapAndCollect(e); + if (failFast) { + failure.unwrapAndCollect(e); + } buffer.waitForReading().listener().onResponse(null); // resume the Driver if it is being blocked on reading - onSinkComplete(); + if (finished == false) { + finished = true; + outstandingSinks.finishInstance(); + completionListener.onFailure(e); + } } void onSinkComplete() { if (finished == false) { finished = true; outstandingSinks.finishInstance(); + completionListener.onResponse(null); } } } @@ -215,23 +242,36 @@ void onSinkComplete() { * Add a remote sink as a new data source of this handler. The handler will start fetching data from this remote sink intermediately. * * @param remoteSink the remote sink - * @param instances the number of concurrent ``clients`` that this handler should use to fetch pages. More clients reduce latency, - * but add overhead. + * @param failFast determines how failures in this remote sink are handled: + * - If {@code false}, failures from this remote sink will not cause the exchange source to abort. + * Callers must handle these failures notified via {@code listener}. + * - If {@code true}, failures from this remote sink will cause the exchange source to abort. + * Callers can safely ignore failures notified via this listener, as they are collected and + * reported by the exchange source. + * @param instances the number of concurrent ``clients`` that this handler should use to fetch pages. + * More clients reduce latency, but add overhead. + * @param listener a listener that will be notified when the sink fails or completes * @see ExchangeSinkHandler#fetchPageAsync(boolean, ActionListener) */ - public void addRemoteSink(RemoteSink remoteSink, int instances) { + public void addRemoteSink(RemoteSink remoteSink, boolean failFast, int instances, ActionListener listener) { + final ActionListener sinkListener = ActionListener.assertAtLeastOnce(ActionListener.notifyOnce(listener)); fetchExecutor.execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - failure.unwrapAndCollect(e); + if (failFast) { + failure.unwrapAndCollect(e); + } buffer.waitForReading().listener().onResponse(null); // resume the Driver if it is being blocked on reading + sinkListener.onFailure(e); } @Override protected void doRun() { - for (int i = 0; i < instances; i++) { - var fetcher = new RemoteSinkFetcher(remoteSink); - fetcher.fetchPage(); + try (RefCountingListener refs = new RefCountingListener(sinkListener)) { + for (int i = 0; i < instances; i++) { + var fetcher = new RemoteSinkFetcher(remoteSink, failFast, refs.acquire()); + fetcher.fetchPage(); + } } } }); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java index c0396fdc469aa..542bf5bc384a5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java @@ -209,8 +209,19 @@ List createDriversForInput(List input, List results, boolean randomIntBetween(2, 10), threadPool.relativeTimeInMillisSupplier() ); - ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler(randomIntBetween(1, 4), threadPool.executor(ESQL_TEST_EXECUTOR)); - sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, 1); + ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler( + randomIntBetween(1, 4), + threadPool.executor(ESQL_TEST_EXECUTOR), + ActionListener.noop() + ); + sourceExchanger.addRemoteSink( + sinkExchanger::fetchPageAsync, + randomBoolean(), + 1, + ActionListener.noop().delegateResponse((l, e) -> { + throw new AssertionError("unexpected failure", e); + }) + ); Iterator intermediateOperatorItr; int itrSize = (splitInput.size() * 3) + 3; // 3 inter ops per initial source drivers, and 3 per final diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index 0b1ecce8c375b..8949f61b7420d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -56,6 +57,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.function.Supplier; @@ -63,6 +65,7 @@ import java.util.stream.IntStream; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; public class ExchangeServiceTests extends ESTestCase { @@ -94,11 +97,10 @@ public void testBasic() throws Exception { ExchangeSinkHandler sinkExchanger = new ExchangeSinkHandler(blockFactory, 2, threadPool.relativeTimeInMillisSupplier()); ExchangeSink sink1 = sinkExchanger.createExchangeSink(); ExchangeSink sink2 = sinkExchanger.createExchangeSink(); - ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler(3, threadPool.executor(ESQL_TEST_EXECUTOR)); PlainActionFuture sourceCompletion = new PlainActionFuture<>(); - sourceExchanger.addCompletionListener(sourceCompletion); + ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler(3, threadPool.executor(ESQL_TEST_EXECUTOR), sourceCompletion); ExchangeSource source = sourceExchanger.createExchangeSource(); - sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, 1); + sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, randomBoolean(), 1, ActionListener.noop()); SubscribableListener waitForReading = source.waitForReading().listener(); assertFalse(waitForReading.isDone()); assertNull(source.pollPage()); @@ -263,7 +265,7 @@ public void close() { } } - void runConcurrentTest( + Set runConcurrentTest( int maxInputSeqNo, int maxOutputSeqNo, Supplier exchangeSource, @@ -318,16 +320,17 @@ protected void start(Driver driver, ActionListener listener) { } }.runToCompletion(drivers, future); future.actionGet(TimeValue.timeValueMinutes(1)); - var expectedSeqNos = IntStream.range(0, Math.min(maxInputSeqNo, maxOutputSeqNo)).boxed().collect(Collectors.toSet()); - assertThat(seqNoCollector.receivedSeqNos, hasSize(expectedSeqNos.size())); - assertThat(seqNoCollector.receivedSeqNos, equalTo(expectedSeqNos)); + return seqNoCollector.receivedSeqNos; } public void testConcurrentWithHandlers() { BlockFactory blockFactory = blockFactory(); PlainActionFuture sourceCompletionFuture = new PlainActionFuture<>(); - var sourceExchanger = new ExchangeSourceHandler(randomExchangeBuffer(), threadPool.executor(ESQL_TEST_EXECUTOR)); - sourceExchanger.addCompletionListener(sourceCompletionFuture); + var sourceExchanger = new ExchangeSourceHandler( + randomExchangeBuffer(), + threadPool.executor(ESQL_TEST_EXECUTOR), + sourceCompletionFuture + ); List sinkHandlers = new ArrayList<>(); Supplier exchangeSink = () -> { final ExchangeSinkHandler sinkHandler; @@ -335,17 +338,89 @@ public void testConcurrentWithHandlers() { sinkHandler = randomFrom(sinkHandlers); } else { sinkHandler = new ExchangeSinkHandler(blockFactory, randomExchangeBuffer(), threadPool.relativeTimeInMillisSupplier()); - sourceExchanger.addRemoteSink(sinkHandler::fetchPageAsync, randomIntBetween(1, 3)); + sourceExchanger.addRemoteSink(sinkHandler::fetchPageAsync, randomBoolean(), randomIntBetween(1, 3), ActionListener.noop()); sinkHandlers.add(sinkHandler); } return sinkHandler.createExchangeSink(); }; final int maxInputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); final int maxOutputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); - runConcurrentTest(maxInputSeqNo, maxOutputSeqNo, sourceExchanger::createExchangeSource, exchangeSink); + Set actualSeqNos = runConcurrentTest(maxInputSeqNo, maxOutputSeqNo, sourceExchanger::createExchangeSource, exchangeSink); + var expectedSeqNos = IntStream.range(0, Math.min(maxInputSeqNo, maxOutputSeqNo)).boxed().collect(Collectors.toSet()); + assertThat(actualSeqNos, hasSize(expectedSeqNos.size())); + assertThat(actualSeqNos, equalTo(expectedSeqNos)); sourceCompletionFuture.actionGet(10, TimeUnit.SECONDS); } + public void testExchangeSourceContinueOnFailure() { + BlockFactory blockFactory = blockFactory(); + PlainActionFuture sourceCompletionFuture = new PlainActionFuture<>(); + var exchangeSourceHandler = new ExchangeSourceHandler( + randomExchangeBuffer(), + threadPool.executor(ESQL_TEST_EXECUTOR), + sourceCompletionFuture + ); + final int maxInputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); + final int maxOutputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); + Set expectedSeqNos = ConcurrentCollections.newConcurrentSet(); + AtomicInteger failedRequests = new AtomicInteger(); + AtomicInteger totalSinks = new AtomicInteger(); + AtomicInteger failedSinks = new AtomicInteger(); + AtomicInteger completedSinks = new AtomicInteger(); + Supplier exchangeSink = () -> { + var sinkHandler = new ExchangeSinkHandler(blockFactory, randomExchangeBuffer(), threadPool.relativeTimeInMillisSupplier()); + int failAfter = randomBoolean() ? Integer.MAX_VALUE : randomIntBetween(0, 100); + AtomicInteger fetched = new AtomicInteger(); + int instance = randomIntBetween(1, 3); + totalSinks.incrementAndGet(); + AtomicBoolean sinkFailed = new AtomicBoolean(); + exchangeSourceHandler.addRemoteSink((allSourcesFinished, listener) -> { + if (fetched.incrementAndGet() > failAfter) { + sinkHandler.fetchPageAsync(true, listener.delegateFailure((l, r) -> { + failedRequests.incrementAndGet(); + sinkFailed.set(true); + listener.onFailure(new CircuitBreakingException("simulated", CircuitBreaker.Durability.PERMANENT)); + })); + } else { + sinkHandler.fetchPageAsync(allSourcesFinished, listener.delegateFailure((l, r) -> { + Page page = r.takePage(); + if (page != null) { + IntBlock block = page.getBlock(0); + for (int i = 0; i < block.getPositionCount(); i++) { + int v = block.getInt(i); + if (v < maxOutputSeqNo) { + expectedSeqNos.add(v); + } + } + } + l.onResponse(new ExchangeResponse(blockFactory, page, r.finished())); + })); + } + }, false, instance, ActionListener.wrap(r -> { + assertFalse(sinkFailed.get()); + completedSinks.incrementAndGet(); + }, e -> { + assertTrue(sinkFailed.get()); + failedSinks.incrementAndGet(); + })); + return sinkHandler.createExchangeSink(); + }; + Set actualSeqNos = runConcurrentTest( + maxInputSeqNo, + maxOutputSeqNo, + exchangeSourceHandler::createExchangeSource, + exchangeSink + ); + assertThat(actualSeqNos, equalTo(expectedSeqNos)); + assertThat(completedSinks.get() + failedSinks.get(), equalTo(totalSinks.get())); + sourceCompletionFuture.actionGet(); + if (failedRequests.get() > 0) { + assertThat(failedSinks.get(), greaterThan(0)); + } else { + assertThat(failedSinks.get(), equalTo(0)); + } + } + public void testEarlyTerminate() { BlockFactory blockFactory = blockFactory(); IntBlock block1 = blockFactory.newConstantIntBlockWith(1, 2); @@ -378,15 +453,31 @@ public void testConcurrentWithTransportActions() { try (exchange0; exchange1; node0; node1) { String exchangeId = "exchange"; Task task = new Task(1, "", "", "", null, Collections.emptyMap()); - var sourceHandler = new ExchangeSourceHandler(randomExchangeBuffer(), threadPool.executor(ESQL_TEST_EXECUTOR)); PlainActionFuture sourceCompletionFuture = new PlainActionFuture<>(); - sourceHandler.addCompletionListener(sourceCompletionFuture); + var sourceHandler = new ExchangeSourceHandler( + randomExchangeBuffer(), + threadPool.executor(ESQL_TEST_EXECUTOR), + sourceCompletionFuture + ); ExchangeSinkHandler sinkHandler = exchange1.createSinkHandler(exchangeId, randomExchangeBuffer()); Transport.Connection connection = node0.getConnection(node1.getLocalNode()); - sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, connection), randomIntBetween(1, 5)); + sourceHandler.addRemoteSink( + exchange0.newRemoteSink(task, exchangeId, node0, connection), + randomBoolean(), + randomIntBetween(1, 5), + ActionListener.noop() + ); final int maxInputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); final int maxOutputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); - runConcurrentTest(maxInputSeqNo, maxOutputSeqNo, sourceHandler::createExchangeSource, sinkHandler::createExchangeSink); + Set actualSeqNos = runConcurrentTest( + maxInputSeqNo, + maxOutputSeqNo, + sourceHandler::createExchangeSource, + sinkHandler::createExchangeSink + ); + var expectedSeqNos = IntStream.range(0, Math.min(maxInputSeqNo, maxOutputSeqNo)).boxed().collect(Collectors.toSet()); + assertThat(actualSeqNos, hasSize(expectedSeqNos.size())); + assertThat(actualSeqNos, equalTo(expectedSeqNos)); sourceCompletionFuture.actionGet(10, TimeUnit.SECONDS); } } @@ -437,12 +528,20 @@ public void sendResponse(TransportResponse transportResponse) { try (exchange0; exchange1; node0; node1) { String exchangeId = "exchange"; Task task = new Task(1, "", "", "", null, Collections.emptyMap()); - var sourceHandler = new ExchangeSourceHandler(randomIntBetween(1, 128), threadPool.executor(ESQL_TEST_EXECUTOR)); PlainActionFuture sourceCompletionFuture = new PlainActionFuture<>(); - sourceHandler.addCompletionListener(sourceCompletionFuture); + var sourceHandler = new ExchangeSourceHandler( + randomIntBetween(1, 128), + threadPool.executor(ESQL_TEST_EXECUTOR), + sourceCompletionFuture + ); ExchangeSinkHandler sinkHandler = exchange1.createSinkHandler(exchangeId, randomIntBetween(1, 128)); Transport.Connection connection = node0.getConnection(node1.getLocalNode()); - sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, connection), randomIntBetween(1, 5)); + sourceHandler.addRemoteSink( + exchange0.newRemoteSink(task, exchangeId, node0, connection), + true, + randomIntBetween(1, 5), + ActionListener.noop() + ); Exception err = expectThrows( Exception.class, () -> runConcurrentTest(maxSeqNo, maxSeqNo, sourceHandler::createExchangeSource, sinkHandler::createExchangeSink) @@ -451,7 +550,7 @@ public void sendResponse(TransportResponse transportResponse) { assertNotNull(cause); assertThat(cause.getMessage(), equalTo("page is too large")); sinkHandler.onFailure(new RuntimeException(cause)); - sourceCompletionFuture.actionGet(10, TimeUnit.SECONDS); + expectThrows(Exception.class, () -> sourceCompletionFuture.actionGet(10, TimeUnit.SECONDS)); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java index 9f5b35e1eb9fb..d73aaee655860 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java @@ -73,8 +73,7 @@ public PhysicalPlan apply(PhysicalPlan plan) { Source.EMPTY, new Project(logicalFragment.source(), logicalFragment, output), fragmentExec.esFilter(), - fragmentExec.estimatedRowSize(), - fragmentExec.reducer() + fragmentExec.estimatedRowSize() ); return new ExchangeExec(exec.source(), output, exec.inBetweenAggs(), newChild); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java index 5b1ee14642dbe..444c111539033 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java @@ -31,7 +31,6 @@ public class FragmentExec extends LeafExec implements EstimatesRowSize { private final LogicalPlan fragment; private final QueryBuilder esFilter; - private final PhysicalPlan reducer; // datanode-level physical plan node that performs an intermediate (not partial) reduce /** * Estimate of the number of bytes that'll be loaded per position before @@ -40,25 +39,28 @@ public class FragmentExec extends LeafExec implements EstimatesRowSize { private final int estimatedRowSize; public FragmentExec(LogicalPlan fragment) { - this(fragment.source(), fragment, null, 0, null); + this(fragment.source(), fragment, null, 0); } - public FragmentExec(Source source, LogicalPlan fragment, QueryBuilder esFilter, int estimatedRowSize, PhysicalPlan reducer) { + public FragmentExec(Source source, LogicalPlan fragment, QueryBuilder esFilter, int estimatedRowSize) { super(source); this.fragment = fragment; this.esFilter = esFilter; this.estimatedRowSize = estimatedRowSize; - this.reducer = reducer; } private FragmentExec(StreamInput in) throws IOException { - this( - Source.readFrom((PlanStreamInput) in), - in.readNamedWriteable(LogicalPlan.class), - in.readOptionalNamedWriteable(QueryBuilder.class), - in.readOptionalVInt(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readOptionalNamedWriteable(PhysicalPlan.class) : null - ); + super(Source.readFrom((PlanStreamInput) in)); + this.fragment = in.readNamedWriteable(LogicalPlan.class); + this.esFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_REMOVE_NODE_LEVEL_PLAN)) { + this.estimatedRowSize = in.readVInt(); + } else { + this.estimatedRowSize = Objects.requireNonNull(in.readOptionalVInt()); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { + in.readOptionalNamedWriteable(PhysicalPlan.class); // for old reducer + } + } } @Override @@ -66,9 +68,13 @@ public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); out.writeNamedWriteable(fragment()); out.writeOptionalNamedWriteable(esFilter()); - out.writeOptionalVInt(estimatedRowSize()); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { - out.writeOptionalNamedWriteable(reducer); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_REMOVE_NODE_LEVEL_PLAN)) { + out.writeVInt(estimatedRowSize); + } else { + out.writeOptionalVInt(estimatedRowSize()); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { + out.writeOptionalNamedWriteable(null);// for old reducer + } } } @@ -89,13 +95,9 @@ public Integer estimatedRowSize() { return estimatedRowSize; } - public PhysicalPlan reducer() { - return reducer; - } - @Override protected NodeInfo info() { - return NodeInfo.create(this, FragmentExec::new, fragment, esFilter, estimatedRowSize, reducer); + return NodeInfo.create(this, FragmentExec::new, fragment, esFilter, estimatedRowSize); } @Override @@ -108,24 +110,20 @@ public PhysicalPlan estimateRowSize(State state) { int estimatedRowSize = state.consumeAllFields(false); return Objects.equals(estimatedRowSize, this.estimatedRowSize) ? this - : new FragmentExec(source(), fragment, esFilter, estimatedRowSize, reducer); + : new FragmentExec(source(), fragment, esFilter, estimatedRowSize); } public FragmentExec withFragment(LogicalPlan fragment) { - return Objects.equals(fragment, this.fragment) ? this : new FragmentExec(source(), fragment, esFilter, estimatedRowSize, reducer); + return Objects.equals(fragment, this.fragment) ? this : new FragmentExec(source(), fragment, esFilter, estimatedRowSize); } public FragmentExec withFilter(QueryBuilder filter) { - return Objects.equals(filter, this.esFilter) ? this : new FragmentExec(source(), fragment, filter, estimatedRowSize, reducer); - } - - public FragmentExec withReducer(PhysicalPlan reducer) { - return Objects.equals(reducer, this.reducer) ? this : new FragmentExec(source(), fragment, esFilter, estimatedRowSize, reducer); + return Objects.equals(filter, this.esFilter) ? this : new FragmentExec(source(), fragment, filter, estimatedRowSize); } @Override public int hashCode() { - return Objects.hash(fragment, esFilter, estimatedRowSize, reducer); + return Objects.hash(fragment, esFilter, estimatedRowSize); } @Override @@ -141,8 +139,7 @@ public boolean equals(Object obj) { FragmentExec other = (FragmentExec) obj; return Objects.equals(fragment, other.fragment) && Objects.equals(esFilter, other.esFilter) - && Objects.equals(estimatedRowSize, other.estimatedRowSize) - && Objects.equals(reducer, other.reducer); + && Objects.equals(estimatedRowSize, other.estimatedRowSize); } @Override @@ -154,7 +151,6 @@ public String nodeString() { sb.append(", estimatedRowSize="); sb.append(estimatedRowSize); sb.append(", reducer=["); - sb.append(reducer == null ? "" : reducer.toString()); sb.append("], fragment=[<>\n"); sb.append(fragment.toString()); sb.append("<>]]"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index eeed811674f60..6a0d1bf9bb035 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -60,6 +60,7 @@ import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.action.EsqlSearchShardsAction; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService; import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec; @@ -196,10 +197,6 @@ public void execute( .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - final var exchangeSource = new ExchangeSourceHandler( - queryPragmas.exchangeBufferSize(), - transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) - ); String local = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; /* * Grab the output attributes here, so we can pass them to @@ -208,46 +205,58 @@ public void execute( */ List outputAttributes = physicalPlan.output(); try ( - Releasable ignored = exchangeSource.addEmptySink(); // this is the top level ComputeListener called once at the end (e.g., once all clusters have finished for a CCS) var computeListener = ComputeListener.create(local, transportService, rootTask, execInfo, listener.map(r -> { execInfo.markEndQuery(); // TODO: revisit this time recording model as part of INLINESTATS improvements return new Result(outputAttributes, collectedPages, r.getProfiles(), execInfo); })) ) { - // run compute on the coordinator - exchangeSource.addCompletionListener(computeListener.acquireAvoid()); - runCompute( - rootTask, - new ComputeContext(sessionId, RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, List.of(), configuration, exchangeSource, null), - coordinatorPlan, - computeListener.acquireCompute(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) + var exchangeSource = new ExchangeSourceHandler( + queryPragmas.exchangeBufferSize(), + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH), + computeListener.acquireAvoid() ); - // starts computes on data nodes on the main cluster - if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) { - startComputeOnDataNodes( + try (Releasable ignored = exchangeSource.addEmptySink()) { + // run compute on the coordinator + runCompute( + rootTask, + new ComputeContext( + sessionId, + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + List.of(), + configuration, + exchangeSource, + null + ), + coordinatorPlan, + computeListener.acquireCompute(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) + ); + // starts computes on data nodes on the main cluster + if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) { + startComputeOnDataNodes( + sessionId, + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + rootTask, + configuration, + dataNodePlan, + Set.of(localConcreteIndices.indices()), + localOriginalIndices, + exchangeSource, + execInfo, + computeListener + ); + } + // starts computes on remote clusters + startComputeOnRemoteClusters( sessionId, - RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, rootTask, configuration, dataNodePlan, - Set.of(localConcreteIndices.indices()), - localOriginalIndices, exchangeSource, - execInfo, + getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices), computeListener ); } - // starts computes on remote clusters - startComputeOnRemoteClusters( - sessionId, - rootTask, - configuration, - dataNodePlan, - exchangeSource, - getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices), - computeListener - ); } } @@ -306,14 +315,7 @@ private void startComputeOnDataNodes( EsqlExecutionInfo executionInfo, ComputeListener computeListener ) { - var planWithReducer = configuration.pragmas().nodeLevelReduction() == false - ? dataNodePlan - : dataNodePlan.transformUp(FragmentExec.class, f -> { - PhysicalPlan reductionNode = PlannerUtils.dataNodeReductionPlan(f.fragment(), dataNodePlan); - return reductionNode == null ? f : f.withReducer(reductionNode); - }); - - QueryBuilder requestFilter = PlannerUtils.requestTimestampFilter(planWithReducer); + QueryBuilder requestFilter = PlannerUtils.requestTimestampFilter(dataNodePlan); var lookupListener = ActionListener.releaseAfter(computeListener.acquireAvoid(), exchangeSource.addEmptySink()); // SearchShards API can_match is done in lookupDataNodes lookupDataNodes(parentTask, clusterAlias, requestFilter, concreteIndices, originalIndices, ActionListener.wrap(dataNodeResult -> { @@ -341,7 +343,7 @@ private void startComputeOnDataNodes( esqlExecutor, refs.acquire().delegateFailureAndWrap((l, unused) -> { var remoteSink = exchangeService.newRemoteSink(parentTask, childSessionId, transportService, node.connection); - exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + exchangeSource.addRemoteSink(remoteSink, true, queryPragmas.concurrentExchangeClients(), ActionListener.noop()); ActionListener computeResponseListener = computeListener.acquireCompute(clusterAlias); var dataNodeListener = ActionListener.runBefore(computeResponseListener, () -> l.onResponse(null)); transportService.sendChildRequest( @@ -353,7 +355,7 @@ private void startComputeOnDataNodes( clusterAlias, node.shardIds, node.aliasFilters, - planWithReducer, + dataNodePlan, originalIndices.indices(), originalIndices.indicesOptions() ), @@ -390,7 +392,7 @@ private void startComputeOnRemoteClusters( esqlExecutor, refs.acquire().delegateFailureAndWrap((l, unused) -> { var remoteSink = exchangeService.newRemoteSink(rootTask, childSessionId, transportService, cluster.connection); - exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + exchangeSource.addRemoteSink(remoteSink, true, queryPragmas.concurrentExchangeClients(), ActionListener.noop()); var remotePlan = new RemoteClusterPlan(plan, cluster.concreteIndices, cluster.originalIndices); var clusterRequest = new ClusterComputeRequest(cluster.clusterAlias, childSessionId, configuration, remotePlan); var clusterListener = ActionListener.runBefore( @@ -442,12 +444,12 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, ); LOGGER.debug("Received physical plan:\n{}", plan); + plan = PlannerUtils.localPlan(context.searchExecutionContexts(), context.configuration, plan); // the planner will also set the driver parallelism in LocalExecutionPlanner.LocalExecutionPlan (used down below) // it's doing this in the planning of EsQueryExec (the source of the data) // see also EsPhysicalOperationProviders.sourcePhysicalOperation LocalExecutionPlanner.LocalExecutionPlan localExecutionPlan = planner.plan(plan); - if (LOGGER.isDebugEnabled()) { LOGGER.debug("Local execution plan:\n{}", localExecutionPlan.describe()); } @@ -733,9 +735,8 @@ private void runComputeOnDataNode( // run the node-level reduction var externalSink = exchangeService.getSinkHandler(externalId); task.addListener(() -> exchangeService.finishSinkHandler(externalId, new TaskCancelledException(task.getReasonCancelled()))); - var exchangeSource = new ExchangeSourceHandler(1, esqlExecutor); - exchangeSource.addCompletionListener(computeListener.acquireAvoid()); - exchangeSource.addRemoteSink(internalSink::fetchPageAsync, 1); + var exchangeSource = new ExchangeSourceHandler(1, esqlExecutor, computeListener.acquireAvoid()); + exchangeSource.addRemoteSink(internalSink::fetchPageAsync, true, 1, ActionListener.noop()); ActionListener reductionListener = computeListener.acquireCompute(); runCompute( task, @@ -778,14 +779,23 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T listener.onFailure(new IllegalStateException("expected a fragment plan for a remote compute; got " + request.plan())); return; } - var localExchangeSource = new ExchangeSourceExec(plan.source(), plan.output(), plan.isIntermediateAgg()); - FragmentExec fragment = (FragmentExec) fragments.get(0); + Holder reducePlanHolder = new Holder<>(); + if (request.pragmas().nodeLevelReduction()) { + PhysicalPlan dataNodePlan = request.plan(); + request.plan() + .forEachUp( + FragmentExec.class, + f -> { reducePlanHolder.set(PlannerUtils.dataNodeReductionPlan(f.fragment(), dataNodePlan)); } + ); + } reducePlan = new ExchangeSinkExec( plan.source(), plan.output(), plan.isIntermediateAgg(), - fragment.reducer() != null ? fragment.reducer().replaceChildren(List.of(localExchangeSource)) : localExchangeSource + reducePlanHolder.get() != null + ? reducePlanHolder.get().replaceChildren(List.of(localExchangeSource)) + : localExchangeSource ); } else { listener.onFailure(new IllegalStateException("expected exchange sink for a remote compute; got " + request.plan())); @@ -872,11 +882,11 @@ void runComputeOnRemoteCluster( final String localSessionId = clusterAlias + ":" + globalSessionId; var exchangeSource = new ExchangeSourceHandler( configuration.pragmas().exchangeBufferSize(), - transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH), + computeListener.acquireAvoid() ); try (Releasable ignored = exchangeSource.addEmptySink()) { exchangeSink.addCompletionListener(computeListener.acquireAvoid()); - exchangeSource.addCompletionListener(computeListener.acquireAvoid()); PhysicalPlan coordinatorPlan = new ExchangeSinkExec( plan.source(), plan.output(), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 010a60ef7da15..c745801bf505f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -539,7 +539,7 @@ void executeSubPlan( bigArrays, ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() * 2)) ); - ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), executor); + ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), executor, ActionListener.noop()); ExchangeSinkHandler exchangeSink = new ExchangeSinkHandler(blockFactory, between(1, 64), threadPool::relativeTimeInMillis); LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( @@ -569,7 +569,14 @@ void executeSubPlan( var physicalTestOptimizer = new TestLocalPhysicalPlanOptimizer(new LocalPhysicalOptimizerContext(configuration, searchStats)); var csvDataNodePhysicalPlan = PlannerUtils.localPlan(dataNodePlan, logicalTestOptimizer, physicalTestOptimizer); - exchangeSource.addRemoteSink(exchangeSink::fetchPageAsync, randomIntBetween(1, 3)); + exchangeSource.addRemoteSink( + exchangeSink::fetchPageAsync, + Randomness.get().nextBoolean(), + randomIntBetween(1, 3), + ActionListener.noop().delegateResponse((l, e) -> { + throw new AssertionError("expected no failure", e); + }) + ); LocalExecutionPlan dataNodeExecutionPlan = executionPlanner.plan(csvDataNodePhysicalPlan); drivers.addAll(dataNodeExecutionPlan.createDrivers(getTestName())); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java index 5989c0de6b61d..f8e12cd4f5ba9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java @@ -66,12 +66,13 @@ protected boolean alwaysEmptySource() { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflicts() throws IOException { - testManyTypeConflicts(false, ByteSizeValue.ofBytes(1424048)); + testManyTypeConflicts(false, ByteSizeValue.ofBytes(1424046L)); /* * History: * 2.3mb - shorten error messages for UnsupportedAttributes #111973 * 1.8mb - cache EsFields #112008 * 1.4mb - string serialization #112929 + * 1424046b - remove node-level plan #117422 */ } @@ -80,7 +81,7 @@ public void testManyTypeConflicts() throws IOException { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflictsWithParent() throws IOException { - testManyTypeConflicts(true, ByteSizeValue.ofBytes(2774192)); + testManyTypeConflicts(true, ByteSizeValue.ofBytes(2774190)); /* * History: * 2 gb+ - start @@ -89,6 +90,7 @@ public void testManyTypeConflictsWithParent() throws IOException { * 3.1mb - cache EsFields #112008 * 2774214b - string serialization #112929 * 2774192b - remove field attribute #112881 + * 2774190b - remove node-level plan #117422 */ } @@ -103,11 +105,12 @@ private void testManyTypeConflicts(boolean withParent, ByteSizeValue expected) t * with a single root field that has many children, grandchildren etc. */ public void testDeeplyNestedFields() throws IOException { - ByteSizeValue expected = ByteSizeValue.ofBytes(47252411); + ByteSizeValue expected = ByteSizeValue.ofBytes(47252409); /* * History: * 48223371b - string serialization #112929 * 47252411b - remove field attribute #112881 + * 47252409b - remove node-level plan */ int depth = 6; @@ -123,11 +126,12 @@ public void testDeeplyNestedFields() throws IOException { * with a single root field that has many children, grandchildren etc. */ public void testDeeplyNestedFieldsKeepOnlyOne() throws IOException { - ByteSizeValue expected = ByteSizeValue.ofBytes(9425806); + ByteSizeValue expected = ByteSizeValue.ofBytes(9425804); /* * History: * 9426058b - string serialization #112929 * 9425806b - remove field attribute #112881 + * 9425804b - remove node-level plan #117422 */ int depth = 6; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExecSerializationTests.java index 3c70290360a56..b36c42a1a06ab 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExecSerializationTests.java @@ -22,8 +22,7 @@ public static FragmentExec randomFragmentExec(int depth) { LogicalPlan fragment = AbstractLogicalPlanSerializationTests.randomChild(depth); QueryBuilder esFilter = EsqlQueryRequestTests.randomQueryBuilder(); int estimatedRowSize = between(0, Integer.MAX_VALUE); - PhysicalPlan reducer = randomChild(depth); - return new FragmentExec(source, fragment, esFilter, estimatedRowSize, reducer); + return new FragmentExec(source, fragment, esFilter, estimatedRowSize); } @Override @@ -36,15 +35,13 @@ protected FragmentExec mutateInstance(FragmentExec instance) throws IOException LogicalPlan fragment = instance.fragment(); QueryBuilder esFilter = instance.esFilter(); int estimatedRowSize = instance.estimatedRowSize(); - PhysicalPlan reducer = instance.reducer(); - switch (between(0, 3)) { + switch (between(0, 2)) { case 0 -> fragment = randomValueOtherThan(fragment, () -> AbstractLogicalPlanSerializationTests.randomChild(0)); case 1 -> esFilter = randomValueOtherThan(esFilter, EsqlQueryRequestTests::randomQueryBuilder); case 2 -> estimatedRowSize = randomValueOtherThan(estimatedRowSize, () -> between(0, Integer.MAX_VALUE)); - case 3 -> reducer = randomValueOtherThan(reducer, () -> randomChild(0)); default -> throw new UnsupportedEncodingException(); } - return new FragmentExec(instance.source(), fragment, esFilter, estimatedRowSize, reducer); + return new FragmentExec(instance.source(), fragment, esFilter, estimatedRowSize); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java index 8d819f9dbcd6c..55f32d07fc2cb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java @@ -305,7 +305,7 @@ private PhysicalPlan plan(String query, QueryBuilder restFilter) { // System.out.println("physical\n" + physical); physical = physical.transformUp( FragmentExec.class, - f -> new FragmentExec(f.source(), f.fragment(), restFilter, f.estimatedRowSize(), f.reducer()) + f -> new FragmentExec(f.source(), f.fragment(), restFilter, f.estimatedRowSize()) ); physical = physicalPlanOptimizer.optimize(physical); // System.out.println("optimized\n" + physical); diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java index 04d12fd51bae7..904b00e6d0450 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java @@ -13,6 +13,8 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xpack.core.XPackPlugin; @@ -46,7 +48,8 @@ public LogsDBPlugin(Settings settings) { @Override public Collection createComponents(PluginServices services) { - licenseService.setLicenseState(XPackPlugin.getSharedLicenseState()); + licenseService.setLicenseService(getLicenseService()); + licenseService.setLicenseState(getLicenseState()); var clusterSettings = services.clusterService().getClusterSettings(); // The `cluster.logsdb.enabled` setting is registered by this plugin, but its value may be updated by other plugins // before this plugin registers its settings update consumer below. This means we might miss updates that occurred earlier. @@ -88,4 +91,12 @@ public List> getSettings() { actions.add(new ActionPlugin.ActionHandler<>(XPackInfoFeatureAction.LOGSDB, LogsDBInfoTransportAction.class)); return actions; } + + protected XPackLicenseState getLicenseState() { + return XPackPlugin.getSharedLicenseState(); + } + + protected LicenseService getLicenseService() { + return XPackPlugin.getSharedLicenseService(); + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java index 1f38ecda19515..462bad4b19551 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java @@ -81,8 +81,13 @@ public Settings getAdditionalIndexSettings( // This index name is used when validating component and index templates, we should skip this check in that case. // (See MetadataIndexTemplateService#validateIndexTemplateV2(...) method) boolean isTemplateValidation = "validate-index-name".equals(indexName); + boolean legacyLicensedUsageOfSyntheticSourceAllowed = isLegacyLicensedUsageOfSyntheticSourceAllowed( + templateIndexMode, + indexName, + dataStreamName + ); if (newIndexHasSyntheticSourceUsage(indexName, templateIndexMode, indexTemplateAndCreateRequestSettings, combinedTemplateMappings) - && syntheticSourceLicenseService.fallbackToStoredSource(isTemplateValidation)) { + && syntheticSourceLicenseService.fallbackToStoredSource(isTemplateValidation, legacyLicensedUsageOfSyntheticSourceAllowed)) { LOGGER.debug("creation of index [{}] with synthetic source without it being allowed", indexName); return Settings.builder() .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.toString()) @@ -167,4 +172,29 @@ private IndexMetadata buildIndexMetadataForMapperService( tmpIndexMetadata.settings(finalResolvedSettings); return tmpIndexMetadata.build(); } + + /** + * The GA-ed use cases in which synthetic source usage is allowed with gold or platinum license. + */ + boolean isLegacyLicensedUsageOfSyntheticSourceAllowed(IndexMode templateIndexMode, String indexName, String dataStreamName) { + if (templateIndexMode == IndexMode.TIME_SERIES) { + return true; + } + + // To allow the following patterns: profiling-metrics and profiling-events + if (dataStreamName != null && dataStreamName.startsWith("profiling-")) { + return true; + } + // To allow the following patterns: .profiling-sq-executables, .profiling-sq-leafframes and .profiling-stacktraces + if (indexName.startsWith(".profiling-")) { + return true; + } + // To allow the following patterns: metrics-apm.transaction.*, metrics-apm.service_transaction.*, metrics-apm.service_summary.*, + // metrics-apm.service_destination.*, "metrics-apm.internal-* and metrics-apm.app.* + if (dataStreamName != null && dataStreamName.startsWith("metrics-apm.")) { + return true; + } + + return false; + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java index 55d4bfe05abe3..1b3513f15a86a 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java @@ -7,18 +7,30 @@ package org.elasticsearch.xpack.logsdb; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.LicensedFeature; import org.elasticsearch.license.XPackLicenseState; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneOffset; + /** * Determines based on license and fallback setting whether synthetic source usages should fallback to stored source. */ final class SyntheticSourceLicenseService { - private static final String MAPPINGS_FEATURE_FAMILY = "mappings"; + static final String MAPPINGS_FEATURE_FAMILY = "mappings"; + // You can only override this property if you received explicit approval from Elastic. + private static final String CUTOFF_DATE_SYS_PROP_NAME = + "es.mapping.synthetic_source_fallback_to_stored_source.cutoff_date_restricted_override"; + private static final Logger LOGGER = LogManager.getLogger(SyntheticSourceLicenseService.class); + static final long DEFAULT_CUTOFF_DATE = LocalDateTime.of(2024, 12, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); /** * A setting that determines whether source mode should always be stored source. Regardless of licence. @@ -30,31 +42,71 @@ final class SyntheticSourceLicenseService { Setting.Property.Dynamic ); - private static final LicensedFeature.Momentary SYNTHETIC_SOURCE_FEATURE = LicensedFeature.momentary( + static final LicensedFeature.Momentary SYNTHETIC_SOURCE_FEATURE = LicensedFeature.momentary( MAPPINGS_FEATURE_FAMILY, "synthetic-source", License.OperationMode.ENTERPRISE ); + static final LicensedFeature.Momentary SYNTHETIC_SOURCE_FEATURE_LEGACY = LicensedFeature.momentary( + MAPPINGS_FEATURE_FAMILY, + "synthetic-source-legacy", + License.OperationMode.GOLD + ); + + private final long cutoffDate; + private LicenseService licenseService; private XPackLicenseState licenseState; private volatile boolean syntheticSourceFallback; SyntheticSourceLicenseService(Settings settings) { - syntheticSourceFallback = FALLBACK_SETTING.get(settings); + this(settings, System.getProperty(CUTOFF_DATE_SYS_PROP_NAME)); + } + + SyntheticSourceLicenseService(Settings settings, String cutoffDate) { + this.syntheticSourceFallback = FALLBACK_SETTING.get(settings); + this.cutoffDate = getCutoffDate(cutoffDate); } /** * @return whether synthetic source mode should fallback to stored source. */ - public boolean fallbackToStoredSource(boolean isTemplateValidation) { + public boolean fallbackToStoredSource(boolean isTemplateValidation, boolean legacyLicensedUsageOfSyntheticSourceAllowed) { if (syntheticSourceFallback) { return true; } + var licenseStateSnapshot = licenseState.copyCurrentLicenseState(); + if (checkFeature(SYNTHETIC_SOURCE_FEATURE, licenseStateSnapshot, isTemplateValidation)) { + return false; + } + + var license = licenseService.getLicense(); + if (license == null) { + return true; + } + + boolean beforeCutoffDate = license.startDate() <= cutoffDate; + if (legacyLicensedUsageOfSyntheticSourceAllowed + && beforeCutoffDate + && checkFeature(SYNTHETIC_SOURCE_FEATURE_LEGACY, licenseStateSnapshot, isTemplateValidation)) { + // platinum license will allow synthetic source with gold legacy licensed feature too. + LOGGER.debug("legacy license [{}] is allowed to use synthetic source", licenseStateSnapshot.getOperationMode().description()); + return false; + } + + return true; + } + + private static boolean checkFeature( + LicensedFeature.Momentary licensedFeature, + XPackLicenseState licenseStateSnapshot, + boolean isTemplateValidation + ) { if (isTemplateValidation) { - return SYNTHETIC_SOURCE_FEATURE.checkWithoutTracking(licenseState) == false; + return licensedFeature.checkWithoutTracking(licenseStateSnapshot); } else { - return SYNTHETIC_SOURCE_FEATURE.check(licenseState) == false; + return licensedFeature.check(licenseStateSnapshot); } } @@ -62,7 +114,26 @@ void setSyntheticSourceFallback(boolean syntheticSourceFallback) { this.syntheticSourceFallback = syntheticSourceFallback; } + void setLicenseService(LicenseService licenseService) { + this.licenseService = licenseService; + } + void setLicenseState(XPackLicenseState licenseState) { this.licenseState = licenseState; } + + private static long getCutoffDate(String cutoffDateAsString) { + if (cutoffDateAsString != null) { + long cutoffDate = LocalDateTime.parse(cutoffDateAsString).toInstant(ZoneOffset.UTC).toEpochMilli(); + LOGGER.warn("Configuring [{}] is only allowed with explicit approval from Elastic.", CUTOFF_DATE_SYS_PROP_NAME); + LOGGER.info( + "Configuring [{}] to [{}]", + CUTOFF_DATE_SYS_PROP_NAME, + LocalDateTime.ofInstant(Instant.ofEpochSecond(cutoffDate), ZoneOffset.UTC) + ); + return cutoffDate; + } else { + return DEFAULT_CUTOFF_DATE; + } + } } diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java new file mode 100644 index 0000000000000..890bc464a2579 --- /dev/null +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.license.AbstractLicensesIntegrationTestCase; +import org.elasticsearch.license.GetFeatureUsageRequest; +import org.elasticsearch.license.GetFeatureUsageResponse; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.LicensedFeature; +import org.elasticsearch.license.TransportGetFeatureUsageAction; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.hamcrest.Matcher; +import org.junit.Before; + +import java.nio.file.Path; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.Collection; +import java.util.List; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; +import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createEnterpriseLicense; +import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createGoldOrPlatinumLicense; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +@ESIntegTestCase.ClusterScope(scope = TEST, numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +public class LegacyLicenceIntegrationTests extends AbstractLicensesIntegrationTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(P.class); + } + + @Before + public void setup() throws Exception { + wipeAllLicenses(); + ensureGreen(); + License license = createGoldOrPlatinumLicense(); + putLicense(license); + ensureGreen(); + } + + public void testSyntheticSourceUsageDisallowed() { + createIndexWithSyntheticSourceAndAssertExpectedType("test", "STORED"); + + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, nullValue()); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, nullValue()); + } + + public void testSyntheticSourceUsageWithLegacyLicense() { + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-stacktraces", "synthetic"); + + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, not(nullValue())); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, nullValue()); + } + + public void testSyntheticSourceUsageWithLegacyLicensePastCutoff() throws Exception { + long startPastCutoff = LocalDateTime.of(2025, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + putLicense(createGoldOrPlatinumLicense(startPastCutoff)); + ensureGreen(); + + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-stacktraces", "STORED"); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, nullValue()); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, nullValue()); + } + + public void testSyntheticSourceUsageWithEnterpriseLicensePastCutoff() throws Exception { + long startPastCutoff = LocalDateTime.of(2025, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + putLicense(createEnterpriseLicense(startPastCutoff)); + ensureGreen(); + + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-traces", "synthetic"); + // also supports non-exceptional indices + createIndexWithSyntheticSourceAndAssertExpectedType("test", "synthetic"); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, nullValue()); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, not(nullValue())); + } + + public void testSyntheticSourceUsageTracksBothLegacyAndRegularFeature() throws Exception { + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-traces", "synthetic"); + + putLicense(createEnterpriseLicense()); + ensureGreen(); + + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-traces-v2", "synthetic"); + + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, not(nullValue())); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, not(nullValue())); + } + + private void createIndexWithSyntheticSourceAndAssertExpectedType(String indexName, String expectedType) { + var settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "synthetic").build(); + createIndex(indexName, settings); + var response = admin().indices().getSettings(new GetSettingsRequest().indices(indexName)).actionGet(); + assertThat( + response.getIndexToSettings().get(indexName).get(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey()), + equalTo(expectedType) + ); + } + + private List getFeatureUsageInfo() { + return client().execute(TransportGetFeatureUsageAction.TYPE, new GetFeatureUsageRequest()).actionGet().getFeatures(); + } + + private void assertFeatureUsage(LicensedFeature.Momentary syntheticSourceFeature, Matcher matcher) { + GetFeatureUsageResponse.FeatureUsageInfo featureUsage = getFeatureUsageInfo().stream() + .filter(f -> f.getFamily().equals(SyntheticSourceLicenseService.MAPPINGS_FEATURE_FAMILY)) + .filter(f -> f.getName().equals(syntheticSourceFeature.getName())) + .findAny() + .orElse(null); + assertThat(featureUsage, matcher); + } + + public static class P extends LocalStateCompositeXPackPlugin { + + public P(final Settings settings, final Path configPath) { + super(settings, configPath); + plugins.add(new LogsDBPlugin(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return P.this.getLicenseState(); + } + + @Override + protected LicenseService getLicenseService() { + return P.this.getLicenseService(); + } + }); + } + + } +} diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java new file mode 100644 index 0000000000000..939d7d892a48d --- /dev/null +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.MapperTestUtils; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.List; + +import static org.elasticsearch.xpack.logsdb.SyntheticSourceIndexSettingsProviderTests.getLogsdbIndexModeSettingsProvider; +import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createGoldOrPlatinumLicense; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SyntheticSourceIndexSettingsProviderLegacyLicenseTests extends ESTestCase { + + private SyntheticSourceIndexSettingsProvider provider; + + @Before + public void setup() throws Exception { + long time = LocalDateTime.of(2024, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + License license = createGoldOrPlatinumLicense(); + var licenseState = new XPackLicenseState(() -> time, new XPackLicenseStatus(license.operationMode(), true, null)); + + var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + licenseService.setLicenseState(licenseState); + var mockLicenseService = mock(LicenseService.class); + when(mockLicenseService.getLicense()).thenReturn(license); + + SyntheticSourceLicenseService syntheticSourceLicenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + syntheticSourceLicenseService.setLicenseState(licenseState); + syntheticSourceLicenseService.setLicenseService(mockLicenseService); + + provider = new SyntheticSourceIndexSettingsProvider( + syntheticSourceLicenseService, + im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), + getLogsdbIndexModeSettingsProvider(false), + IndexVersion::current + ); + } + + public void testGetAdditionalIndexSettingsDefault() { + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + String dataStreamName = "metrics-my-app"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, null, null, null, settings, List.of()); + assertThat(result.size(), equalTo(1)); + assertThat(result.get(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey()), equalTo("STORED")); + } + + public void testGetAdditionalIndexSettingsApm() throws IOException { + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + String dataStreamName = "metrics-apm.app.test"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, null, null, null, settings, List.of()); + assertThat(result.size(), equalTo(0)); + } + + public void testGetAdditionalIndexSettingsProfiling() throws IOException { + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + for (String dataStreamName : new String[] { "profiling-metrics", "profiling-events" }) { + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, null, null, null, settings, List.of()); + assertThat(result.size(), equalTo(0)); + } + + for (String indexName : new String[] { ".profiling-sq-executables", ".profiling-sq-leafframes", ".profiling-stacktraces" }) { + var result = provider.getAdditionalIndexSettings(indexName, null, null, null, null, settings, List.of()); + assertThat(result.size(), equalTo(0)); + } + } + + public void testGetAdditionalIndexSettingsTsdb() throws IOException { + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + String dataStreamName = "metrics-my-app"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, IndexMode.TIME_SERIES, null, null, settings, List.of()); + assertThat(result.size(), equalTo(0)); + } + + public void testGetAdditionalIndexSettingsTsdbAfterCutoffDate() throws Exception { + long start = LocalDateTime.of(2024, 12, 20, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + License license = createGoldOrPlatinumLicense(start); + long time = LocalDateTime.of(2024, 12, 31, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + var licenseState = new XPackLicenseState(() -> time, new XPackLicenseStatus(license.operationMode(), true, null)); + + var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + licenseService.setLicenseState(licenseState); + var mockLicenseService = mock(LicenseService.class); + when(mockLicenseService.getLicense()).thenReturn(license); + + SyntheticSourceLicenseService syntheticSourceLicenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + syntheticSourceLicenseService.setLicenseState(licenseState); + syntheticSourceLicenseService.setLicenseService(mockLicenseService); + + provider = new SyntheticSourceIndexSettingsProvider( + syntheticSourceLicenseService, + im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), + getLogsdbIndexModeSettingsProvider(false), + IndexVersion::current + ); + + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + String dataStreamName = "metrics-my-app"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, IndexMode.TIME_SERIES, null, null, settings, List.of()); + assertThat(result.size(), equalTo(1)); + assertThat(result.get(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey()), equalTo("STORED")); + } +} diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java index d6cdb9f761b31..df1fb8f2d958c 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java @@ -18,6 +18,8 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -28,6 +30,7 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.common.settings.Settings.builder; +import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createEnterpriseLicense; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -39,18 +42,22 @@ public class SyntheticSourceIndexSettingsProviderTests extends ESTestCase { private SyntheticSourceIndexSettingsProvider provider; private final AtomicInteger newMapperServiceCounter = new AtomicInteger(); - private static LogsdbIndexModeSettingsProvider getLogsdbIndexModeSettingsProvider(boolean enabled) { + static LogsdbIndexModeSettingsProvider getLogsdbIndexModeSettingsProvider(boolean enabled) { return new LogsdbIndexModeSettingsProvider(Settings.builder().put("cluster.logsdb.enabled", enabled).build()); } @Before - public void setup() { - MockLicenseState licenseState = mock(MockLicenseState.class); + public void setup() throws Exception { + MockLicenseState licenseState = MockLicenseState.createMock(); when(licenseState.isAllowed(any())).thenReturn(true); var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); licenseService.setLicenseState(licenseState); + var mockLicenseService = mock(LicenseService.class); + License license = createEnterpriseLicense(); + when(mockLicenseService.getLicense()).thenReturn(license); syntheticSourceLicenseService = new SyntheticSourceLicenseService(Settings.EMPTY); syntheticSourceLicenseService.setLicenseState(licenseState); + syntheticSourceLicenseService.setLicenseService(mockLicenseService); provider = new SyntheticSourceIndexSettingsProvider(syntheticSourceLicenseService, im -> { newMapperServiceCounter.incrementAndGet(); diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseServiceTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseServiceTests.java index 430ee75eb3561..90a13b16c028e 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseServiceTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseServiceTests.java @@ -8,54 +8,195 @@ package org.elasticsearch.xpack.logsdb; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.MockLicenseState; +import org.elasticsearch.license.TestUtils; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; import org.mockito.Mockito; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.UUID; + +import static org.elasticsearch.license.TestUtils.dateMath; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class SyntheticSourceLicenseServiceTests extends ESTestCase { + private LicenseService mockLicenseService; + private SyntheticSourceLicenseService licenseService; + + @Before + public void setup() throws Exception { + mockLicenseService = mock(LicenseService.class); + License license = createEnterpriseLicense(); + when(mockLicenseService.getLicense()).thenReturn(license); + licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + } + public void testLicenseAllowsSyntheticSource() { - MockLicenseState licenseState = mock(MockLicenseState.class); - when(licenseState.isAllowed(any())).thenReturn(true); - var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(true); licenseService.setLicenseState(licenseState); - assertFalse("synthetic source is allowed, so not fallback to stored source", licenseService.fallbackToStoredSource(false)); + licenseService.setLicenseService(mockLicenseService); + assertFalse( + "synthetic source is allowed, so not fallback to stored source", + licenseService.fallbackToStoredSource(false, randomBoolean()) + ); Mockito.verify(licenseState, Mockito.times(1)).featureUsed(any()); } public void testLicenseAllowsSyntheticSourceTemplateValidation() { - MockLicenseState licenseState = mock(MockLicenseState.class); - when(licenseState.isAllowed(any())).thenReturn(true); - var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(true); licenseService.setLicenseState(licenseState); - assertFalse("synthetic source is allowed, so not fallback to stored source", licenseService.fallbackToStoredSource(true)); + licenseService.setLicenseService(mockLicenseService); + assertFalse( + "synthetic source is allowed, so not fallback to stored source", + licenseService.fallbackToStoredSource(true, randomBoolean()) + ); Mockito.verify(licenseState, Mockito.never()).featureUsed(any()); } public void testDefaultDisallow() { - MockLicenseState licenseState = mock(MockLicenseState.class); - when(licenseState.isAllowed(any())).thenReturn(false); - var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(false); licenseService.setLicenseState(licenseState); - assertTrue("synthetic source is not allowed, so fallback to stored source", licenseService.fallbackToStoredSource(false)); + licenseService.setLicenseService(mockLicenseService); + assertTrue( + "synthetic source is not allowed, so fallback to stored source", + licenseService.fallbackToStoredSource(false, randomBoolean()) + ); Mockito.verify(licenseState, Mockito.never()).featureUsed(any()); } public void testFallback() { - MockLicenseState licenseState = mock(MockLicenseState.class); - when(licenseState.isAllowed(any())).thenReturn(true); - var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(true); licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); licenseService.setSyntheticSourceFallback(true); assertTrue( "synthetic source is allowed, but fallback has been enabled, so fallback to stored source", - licenseService.fallbackToStoredSource(false) + licenseService.fallbackToStoredSource(false, randomBoolean()) ); Mockito.verifyNoInteractions(licenseState); + Mockito.verifyNoInteractions(mockLicenseService); + } + + public void testGoldOrPlatinumLicense() throws Exception { + mockLicenseService = mock(LicenseService.class); + License license = createGoldOrPlatinumLicense(); + when(mockLicenseService.getLicense()).thenReturn(license); + + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.getOperationMode()).thenReturn(license.operationMode()); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY))).thenReturn(true); + licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); + assertFalse( + "legacy licensed usage is allowed, so not fallback to stored source", + licenseService.fallbackToStoredSource(false, true) + ); + Mockito.verify(licenseState, Mockito.times(1)).featureUsed(any()); } + public void testGoldOrPlatinumLicenseLegacyLicenseNotAllowed() throws Exception { + mockLicenseService = mock(LicenseService.class); + License license = createGoldOrPlatinumLicense(); + when(mockLicenseService.getLicense()).thenReturn(license); + + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.getOperationMode()).thenReturn(license.operationMode()); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(false); + licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); + assertTrue( + "legacy licensed usage is not allowed, so fallback to stored source", + licenseService.fallbackToStoredSource(false, false) + ); + Mockito.verify(licenseState, Mockito.never()).featureUsed(any()); + Mockito.verify(licenseState, Mockito.times(1)).isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE)); + } + + public void testGoldOrPlatinumLicenseBeyondCutoffDate() throws Exception { + long start = LocalDateTime.of(2025, 1, 1, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + License license = createGoldOrPlatinumLicense(start); + mockLicenseService = mock(LicenseService.class); + when(mockLicenseService.getLicense()).thenReturn(license); + + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.getOperationMode()).thenReturn(license.operationMode()); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(false); + licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); + assertTrue("beyond cutoff date, so fallback to stored source", licenseService.fallbackToStoredSource(false, true)); + Mockito.verify(licenseState, Mockito.never()).featureUsed(any()); + Mockito.verify(licenseState, Mockito.times(1)).isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE)); + } + + public void testGoldOrPlatinumLicenseCustomCutoffDate() throws Exception { + licenseService = new SyntheticSourceLicenseService(Settings.EMPTY, "2025-01-02T00:00"); + + long start = LocalDateTime.of(2025, 1, 1, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + License license = createGoldOrPlatinumLicense(start); + mockLicenseService = mock(LicenseService.class); + when(mockLicenseService.getLicense()).thenReturn(license); + + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.getOperationMode()).thenReturn(license.operationMode()); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY))).thenReturn(true); + licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); + assertFalse("custom cutoff date, so fallback to stored source", licenseService.fallbackToStoredSource(false, true)); + Mockito.verify(licenseState, Mockito.times(1)).featureUsed(any()); + Mockito.verify(licenseState, Mockito.times(1)).isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY)); + } + + static License createEnterpriseLicense() throws Exception { + long start = LocalDateTime.of(2024, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + return createEnterpriseLicense(start); + } + + static License createEnterpriseLicense(long start) throws Exception { + String uid = UUID.randomUUID().toString(); + long currentTime = System.currentTimeMillis(); + final License.Builder builder = License.builder() + .uid(uid) + .version(License.VERSION_CURRENT) + .expiryDate(dateMath("now+2d", currentTime)) + .startDate(start) + .issueDate(currentTime) + .type("enterprise") + .issuedTo("customer") + .issuer("elasticsearch") + .maxResourceUnits(10); + return TestUtils.generateSignedLicense(builder); + } + + static License createGoldOrPlatinumLicense() throws Exception { + long start = LocalDateTime.of(2024, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + return createGoldOrPlatinumLicense(start); + } + + static License createGoldOrPlatinumLicense(long start) throws Exception { + String uid = UUID.randomUUID().toString(); + long currentTime = System.currentTimeMillis(); + final License.Builder builder = License.builder() + .uid(uid) + .version(License.VERSION_CURRENT) + .expiryDate(dateMath("now+100d", currentTime)) + .startDate(start) + .issueDate(currentTime) + .type(randomBoolean() ? "gold" : "platinum") + .issuedTo("customer") + .issuer("elasticsearch") + .maxNodes(5); + return TestUtils.generateSignedLicense(builder); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java index 95b6a18182f9b..efc041e2225a4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java @@ -86,7 +86,7 @@ public int compareTo(BucketKey o) { } } - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket + public static class Bucket extends InternalMultiBucketAggregation.InternalBucketWritable implements MultiBucketsAggregation.Bucket, Comparable { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointBucket.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointBucket.java index 39bdb69d4da40..aed0c40043cae 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointBucket.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointBucket.java @@ -18,7 +18,7 @@ import java.io.IOException; import java.util.Objects; -public class ChangePointBucket extends InternalMultiBucketAggregation.InternalBucket implements ToXContent { +public class ChangePointBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements ToXContent { private final Object key; private final long docCount; private final InternalAggregations aggregations; diff --git a/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java b/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java index 3049fe830e728..989e5468c4fb3 100644 --- a/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java +++ b/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java @@ -44,7 +44,14 @@ public class S3SearchableSnapshotsCredentialsReloadIT extends ESRestTestCase { private static final String BUCKET = "S3SearchableSnapshotsCredentialsReloadIT-bucket"; private static final String BASE_PATH = "S3SearchableSnapshotsCredentialsReloadIT-base-path"; - public static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, "ignored"); + private static volatile String repositoryAccessKey; + + public static final S3HttpFixture s3Fixture = new S3HttpFixture( + true, + BUCKET, + BASE_PATH, + S3HttpFixture.mutableAccessKey(() -> repositoryAccessKey) + ); private static final MutableSettingsProvider keystoreSettings = new MutableSettingsProvider(); @@ -78,7 +85,7 @@ public void testReloadCredentialsFromKeystore() throws IOException { // Set up initial credentials final String accessKey1 = randomIdentifier(); - s3Fixture.setAccessKey(accessKey1); + repositoryAccessKey = accessKey1; keystoreSettings.put("s3.client.default.access_key", accessKey1); keystoreSettings.put("s3.client.default.secret_key", randomIdentifier()); cluster.updateStoredSecureSettings(); @@ -92,7 +99,7 @@ public void testReloadCredentialsFromKeystore() throws IOException { // Rotate credentials in blob store logger.info("--> rotate credentials"); final String accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier); - s3Fixture.setAccessKey(accessKey2); + repositoryAccessKey = accessKey2; // Ensure searchable snapshot now does not work due to invalid credentials logger.info("--> expect failure"); @@ -118,7 +125,7 @@ public void testReloadCredentialsFromAlternativeClient() throws IOException { final String accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier); final String alternativeClient = randomValueOtherThan("default", ESTestCase::randomIdentifier); - s3Fixture.setAccessKey(accessKey1); + repositoryAccessKey = accessKey1; keystoreSettings.put("s3.client.default.access_key", accessKey1); keystoreSettings.put("s3.client.default.secret_key", randomIdentifier()); keystoreSettings.put("s3.client." + alternativeClient + ".access_key", accessKey2); @@ -133,7 +140,7 @@ public void testReloadCredentialsFromAlternativeClient() throws IOException { // Rotate credentials in blob store logger.info("--> rotate credentials"); - s3Fixture.setAccessKey(accessKey2); + repositoryAccessKey = accessKey2; // Ensure searchable snapshot now does not work due to invalid credentials logger.info("--> expect failure"); @@ -157,7 +164,7 @@ public void testReloadCredentialsFromMetadata() throws IOException { final String accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier); testHarness.putRepository(b -> b.put("access_key", accessKey1).put("secret_key", randomIdentifier())); - s3Fixture.setAccessKey(accessKey1); + repositoryAccessKey = accessKey1; testHarness.createFrozenSearchableSnapshotIndex(); @@ -166,7 +173,7 @@ public void testReloadCredentialsFromMetadata() throws IOException { // Rotate credentials in blob store logger.info("--> rotate credentials"); - s3Fixture.setAccessKey(accessKey2); + repositoryAccessKey = accessKey2; // Ensure searchable snapshot now does not work due to invalid credentials logger.info("--> expect failure"); @@ -269,7 +276,7 @@ void ensureSearchFailure() throws IOException { assertThat( expectThrows(ResponseException.class, () -> client().performRequest(searchRequest)).getMessage(), allOf( - containsString("Bad access key"), + containsString("Access denied"), containsString("Status Code: 403"), containsString("Error Code: AccessDenied"), containsString("failed to read data from cache")