Skip to content

Commit

Permalink
Merge branch 'main' into test/99778/mappings-update-race-condition
Browse files Browse the repository at this point in the history
  • Loading branch information
williamrandolph committed Dec 6, 2023
2 parents ec18558 + 6e248a8 commit 958b0ce
Show file tree
Hide file tree
Showing 211 changed files with 3,657 additions and 814 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/

package org.elasticsearch.benchmark.index.mapper;

import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.LuceneDocument;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.xcontent.XContentType;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;

import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
import java.util.stream.Stream;

@Fork(value = 3)
@Warmup(iterations = 3)
@Measurement(iterations = 5)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@State(Scope.Benchmark)
public class DynamicMapperBenchmark {

@Param({ "1600172297" })
private long seed;

private Random random;
private SourceToParse[] sources;

@Setup
public void setUp() {
this.random = new Random(seed);
this.sources = generateRandomDocuments(500);
}

private SourceToParse[] generateRandomDocuments(int count) {
var docs = new SourceToParse[count];
for (int i = 0; i < count; i++) {
docs[i] = generateRandomDocument();
}
return docs;
}

private SourceToParse generateRandomDocument() {
int textFields = 50;
int intFields = 50;
int floatFields = 50;
int objFields = 10;
int objFieldDepth = 10;
int fieldValueCountMax = 25;
StringBuilder builder = new StringBuilder();
builder.append("{");
for (int i = 0; i < textFields; i++) {
if (random.nextBoolean()) {
StringBuilder fieldValueBuilder = generateTextField(fieldValueCountMax);
builder.append("\"text_field_").append(i).append("\":").append(fieldValueBuilder).append(",");
}
}
for (int i = 0; i < intFields; i++) {
if (random.nextBoolean()) {
int fieldValueCount = random.nextInt(fieldValueCountMax);
builder.append("\"int_field_")
.append(i)
.append("\":")
.append(Arrays.toString(IntStream.generate(() -> random.nextInt()).limit(fieldValueCount).toArray()))
.append(",");
}
}
for (int i = 0; i < floatFields; i++) {
if (random.nextBoolean()) {
int fieldValueCount = random.nextInt(fieldValueCountMax);
builder.append("\"float_field_")
.append(i)
.append("\":")
.append(Arrays.toString(DoubleStream.generate(() -> random.nextFloat()).limit(fieldValueCount).toArray()))
.append(",");
}
}
for (int i = 0; i < objFields; i++) {
final int idx = i;
if (random.nextBoolean()) {
continue;
}
String objFieldPrefix = Stream.generate(() -> "obj_field_" + idx).limit(objFieldDepth).collect(Collectors.joining("."));
for (int j = 0; j < textFields; j++) {
if (random.nextBoolean()) {
StringBuilder fieldValueBuilder = generateTextField(fieldValueCountMax);
builder.append("\"")
.append(objFieldPrefix)
.append(".text_field_")
.append(j)
.append("\":")
.append(fieldValueBuilder)
.append(",");
}
}
for (int j = 0; j < intFields; j++) {
if (random.nextBoolean()) {
int fieldValueCount = random.nextInt(fieldValueCountMax);
builder.append("\"")
.append(objFieldPrefix)
.append(".int_field_")
.append(j)
.append("\":")
.append(Arrays.toString(IntStream.generate(() -> random.nextInt()).limit(fieldValueCount).toArray()))
.append(",");
}
}
for (int j = 0; j < floatFields; j++) {
if (random.nextBoolean()) {
int fieldValueCount = random.nextInt(fieldValueCountMax);
builder.append("\"")
.append(objFieldPrefix)
.append(".float_field_")
.append(j)
.append("\":")
.append(Arrays.toString(DoubleStream.generate(() -> random.nextFloat()).limit(fieldValueCount).toArray()))
.append(",");
}
}
}
if (builder.charAt(builder.length() - 1) == ',') {
builder.deleteCharAt(builder.length() - 1);
}
builder.append("}");
return new SourceToParse(UUIDs.randomBase64UUID(), new BytesArray(builder.toString()), XContentType.JSON);
}

private StringBuilder generateTextField(int fieldValueCountMax) {
int fieldValueCount = random.nextInt(fieldValueCountMax);
StringBuilder fieldValueBuilder = new StringBuilder();
fieldValueBuilder.append("[");
for (int j = 0; j < fieldValueCount - 1; j++) {
fieldValueBuilder.append("\"").append(randomString(6)).append("\"").append(",");
}
return fieldValueBuilder.append("\"").append(randomString(6)).append("\"").append("]");
}

private String randomString(int maxLength) {
var length = random.nextInt(maxLength);
var builder = new StringBuilder(length);
for (int i = 0; i < length; i++) {
builder.append((byte) (32 + random.nextInt(94)));
}
return builder.toString();
}

@SafeVarargs
@SuppressWarnings("varargs")
private <T> T randomFrom(T... items) {
return items[random.nextInt(items.length)];
}

@Benchmark
public List<LuceneDocument> benchmarkDynamicallyCreatedFields() throws Exception {
MapperService mapperService = MapperServiceFactory.create("{}");
for (int i = 0; i < 25; i++) {
DocumentMapper documentMapper = mapperService.documentMapper();
Mapping mapping = null;
if (documentMapper == null) {
documentMapper = DocumentMapper.createEmpty(mapperService);
mapping = documentMapper.mapping();
}
ParsedDocument doc = documentMapper.parse(randomFrom(sources));
if (mapping != null) {
doc.addDynamicMappingsUpdate(mapping);
}
if (doc.dynamicMappingsUpdate() != null) {
mapperService.merge(
"_doc",
new CompressedXContent(XContentHelper.toXContent(doc.dynamicMappingsUpdate(), XContentType.JSON, false)),
MapperService.MergeReason.MAPPING_UPDATE
);
}
}
return mapperService.documentMapper().parse(randomFrom(sources)).docs();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ private List<File> resolveProjectLogs(File projectDir) {
projectDirFiles.include("**/build/testclusters/**");
projectDirFiles.include("**/build/testrun/*/temp/**");
projectDirFiles.include("**/build/**/hs_err_pid*.log");
projectDirFiles.include("**/build/**/replay_pid*.log");
projectDirFiles.exclude("**/build/testclusters/**/data/**");
projectDirFiles.exclude("**/build/testclusters/**/distro/**");
projectDirFiles.exclude("**/build/testclusters/**/repo/**");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ public void apply(Project project) {
File testOutputDir = new File(test.getReports().getJunitXml().getOutputLocation().getAsFile().get(), "output");

ErrorReportingTestListener listener = new ErrorReportingTestListener(test, testOutputDir);
test.getInputs().property(DUMP_OUTPUT_ON_FAILURE_PROP_NAME, true);
test.getExtensions().getExtraProperties().set(DUMP_OUTPUT_ON_FAILURE_PROP_NAME, true);
test.getExtensions().add("errorReportingTestListener", listener);
test.addTestOutputListener(listener);
test.addTestListener(listener);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,8 @@ public void close() throws IOException {
}

private boolean isDumpOutputEnabled() {
return (Boolean) testTask.getInputs()
return (Boolean) testTask.getExtensions()
.getExtraProperties()
.getProperties()
.getOrDefault(ElasticsearchTestBasePlugin.DUMP_OUTPUT_ON_FAILURE_PROP_NAME, true);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ public void apply(Project project) {
nonInputSystemProperties.systemProperty(TESTS_MAX_PARALLEL_FORKS_SYSPROP, () -> String.valueOf(task.getMaxParallelForks()));

// Disable test failure reporting since this stuff is now captured in build scans
task.getInputs().property(ElasticsearchTestBasePlugin.DUMP_OUTPUT_ON_FAILURE_PROP_NAME, false);
task.getExtensions().getExtraProperties().set(ElasticsearchTestBasePlugin.DUMP_OUTPUT_ON_FAILURE_PROP_NAME, false);

// Disable the security manager and syscall filter since the test framework needs to fork processes
task.systemProperty("tests.security.manager", "false");
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/102901.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 102901
summary: Introduce local block factory
area: ES|QL
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/102925.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 102925
summary: Add ldap user metadata mappings for full name and email
area: Authentication
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/102937.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 102937
summary: "ESQL: New telemetry commands"
area: ES|QL
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/102994.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 102994
summary: Enable Connectors API as technical preview
area: Application
type: feature
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/103013.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103013
summary: Deprecate the unused `elasticsearch_version` field of enrich policy json
area: Ingest Node
type: enhancement
issues: []
10 changes: 9 additions & 1 deletion docs/reference/rest-api/usage.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,15 @@ GET /_xpack/usage
"grok" : 0,
"limit" : 0,
"where" : 0,
"sort" : 0
"sort" : 0,
"drop" : 0,
"show" : 0,
"rename" : 0,
"mv_expand" : 0,
"keep" : 0,
"enrich" : 0,
"from" : 0,
"row" : 0
},
"queries" : {
"rest" : {
Expand Down
10 changes: 10 additions & 0 deletions docs/reference/settings/security-settings.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -431,6 +431,16 @@ Specifies the attribute to examine on the user for group membership.
If any `group_search` settings are specified, this setting is ignored. Defaults
to `memberOf`.

`user_full_name_attribute`::
(<<static-cluster-setting,Static>>)
Specifies the attribute to examine on the user for the full name of the user.
Defaults to `cn`.

`user_email_attribute`::
(<<static-cluster-setting,Static>>)
Specifies the attribute to examine on the user for the email address of the user.
Defaults to `mail`.

`user_search.base_dn`::
(<<static-cluster-setting,Static>>)
Specifies a container DN to search for users. Required
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/setup/sysconfig/tcpretries.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ Most Linux distributions default to retransmitting any lost packets 15 times.
Retransmissions back off exponentially, so these 15 retransmissions take over
900 seconds to complete. This means it takes Linux many minutes to detect a
network partition or a failed node with this method. Windows defaults to just 5
retransmissions which corresponds with a timeout of around 6 seconds.
retransmissions which corresponds with a timeout of around 13 seconds.

The Linux default allows for communication over networks that may experience
very long periods of packet loss, but this default is excessive and even harmful
Expand All @@ -32,7 +32,7 @@ therefore reduce the maximum number of TCP retransmissions.

You can decrease the maximum number of TCP retransmissions to `5` by running the
following command as `root`. Five retransmissions corresponds with a timeout of
around six seconds.
around 13 seconds.

[source,sh]
-------------------------------------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,7 @@ The cluster state includes:
* <<indices-templates-v1,Legacy index templates>>
* <<ingest,Ingest pipelines>>
* <<index-lifecycle-management,{ilm-init} policies>>
* <<script-stored-scripts,Stored scripts>>
* For snapshots taken after 7.12.0, <<feature-state,feature states>>
// end::cluster-state-contents[]

Expand Down Expand Up @@ -260,12 +261,12 @@ You may want to restore an index in-place, for example when no alternative
options surface after the <<cluster-allocation-explain>> API reports
`no_valid_shard_copy`.

The following request <<indices-close,closes>> `index_1` and then restores it
The following request <<indices-close,closes>> `index_1` and then restores it
in-place from the `snapshot_2` snapshot in the `my_repository` repository.

[source,console]
----
POST index_1/_close
POST index_1/_close
POST /_snapshot/my_repository/snapshot_2/_restore?wait_for_completion=true
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,10 @@ private static void rethrottleParentTask(
subRequest.setRequestsPerSecond(newRequestsPerSecond / runningSubtasks);
subRequest.setTargetParentTaskId(new TaskId(localNodeId, task.getId()));
logger.debug("rethrottling children of task [{}] to [{}] requests per second", task.getId(), subRequest.getRequestsPerSecond());
client.execute(ReindexPlugin.RETHROTTLE_ACTION, subRequest, ActionListener.wrap(r -> {
client.execute(ReindexPlugin.RETHROTTLE_ACTION, subRequest, listener.delegateFailureAndWrap((l, r) -> {
r.rethrowFailures("Rethrottle");
listener.onResponse(task.taskInfoGivenSubtaskInfo(localNodeId, r.getTasks()));
}, listener::onFailure));
l.onResponse(task.taskInfoGivenSubtaskInfo(localNodeId, r.getTasks()));
}));
} else {
logger.debug("children of task [{}] are already finished, nothing to rethrottle", task.getId());
listener.onResponse(task.taskInfo(localNodeId, true));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,11 @@
import static org.hamcrest.Matchers.hasToString;
import static org.hamcrest.Matchers.theInstance;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.atMost;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;

public class TransportRethrottleActionTests extends ESTestCase {
private int slices;
Expand Down Expand Up @@ -65,6 +67,7 @@ private void rethrottleTestCase(
float newRequestsPerSecond = randomValueOtherThanMany(f -> f <= 0, () -> randomFloat());
@SuppressWarnings("unchecked")
ActionListener<TaskInfo> listener = mock(ActionListener.class);
when(listener.delegateFailureAndWrap(any())).thenCallRealMethod();

TransportRethrottleAction.rethrottle(logger, localNodeId, client, task, newRequestsPerSecond, listener);

Expand Down
Loading

0 comments on commit 958b0ce

Please sign in to comment.