From de61189a453b44025b1b0f439fc149e794dc19eb Mon Sep 17 00:00:00 2001 From: Jay Greenberg Date: Tue, 7 Nov 2017 10:19:24 -0500 Subject: [PATCH 01/19] Update discovery-ec2.asciidoc Changed recommendation from "use tribe node" to "use cross cluster search" --- docs/plugins/discovery-ec2.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 121e3adbc60f9..db0def041a8f6 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -223,7 +223,7 @@ Prefer https://aws.amazon.com/amazon-linux-ami/[Amazon Linux AMIs] as since Elas * Networking throttling takes place on smaller instance types in both the form of https://lab.getbase.com/how-we-discovered-limitations-on-the-aws-tcp-stack/[bandwidth and number of connections]. Therefore if large number of connections are needed and networking is becoming a bottleneck, avoid https://aws.amazon.com/ec2/instance-types/[instance types] with networking labeled as `Moderate` or `Low`. * Multicast is not supported, even when in an VPC; the aws cloud plugin which joins by performing a security group lookup. * When running in multiple http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zones] be sure to leverage {ref}/allocation-awareness.html[shard allocation awareness] so that not all copies of shard data reside in the same availability zone. -* Do not span a cluster across regions. If necessary, use a tribe node. +* Do not span a cluster across regions. If necessary, use cross cluster search. ===== Misc * If you have split your nodes into roles, consider https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[tagging the EC2 instances] by role to make it easier to filter and view your EC2 instances in the AWS console. From f321c6da92576d0d377fda8fcb45595cbf835d2a Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Wed, 8 Nov 2017 00:45:47 -0800 Subject: [PATCH 02/19] scripted_metric _agg parameter disappears if params are provided (#27159) * Fixes #19768: scripted_metric _agg parameter disappears if params are provided * Test case for #19768 * Compare boolean to false instead of negating it * Added mocked script in ScriptedMetricIT * Fix test in ScriptedMetricIT for implicit _agg map --- .../ScriptedMetricAggregatorFactory.java | 2 + .../metrics/ScriptedMetricIT.java | 50 +++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java index 2d9e02d08cb01..aa7de3e1ab6e1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java @@ -70,6 +70,8 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu params = deepCopyParams(params, context); } else { params = new HashMap<>(); + } + if (params.containsKey("_agg") == false) { params.put("_agg", new HashMap()); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index dab523b7c348d..24d94d5a4643c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -94,6 +94,10 @@ protected Map, Object>> pluginScripts() { scripts.put("_agg.add(1)", vars -> aggScript(vars, agg -> ((List) agg).add(1))); + scripts.put("_agg[param1] = param2", vars -> + aggScript(vars, agg -> ((Map) agg).put(XContentMapValues.extractValue("params.param1", vars), + XContentMapValues.extractValue("params.param2", vars)))); + scripts.put("vars.multiplier = 3", vars -> ((Map) vars.get("vars")).put("multiplier", 3)); @@ -356,6 +360,52 @@ public void testMapWithParams() { assertThat(totalCount, equalTo(numDocs)); } + public void testMapWithParamsAndImplicitAggMap() { + Map params = new HashMap<>(); + // don't put any _agg map in params + params.put("param1", "12"); + params.put("param2", 1); + + // The _agg hashmap will be available even if not declared in the params map + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", params); + + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)) + .get(); + assertSearchResponse(response); + assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + int numShardsRun = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Map.class)); + Map map = (Map) object; + for (Map.Entry entry : map.entrySet()) { + assertThat(entry, notNullValue()); + assertThat(entry.getKey(), notNullValue()); + assertThat(entry.getKey(), instanceOf(String.class)); + assertThat(entry.getValue(), notNullValue()); + assertThat(entry.getValue(), instanceOf(Number.class)); + String stringValue = (String) entry.getKey(); + assertThat(stringValue, equalTo("12")); + Number numberValue = (Number) entry.getValue(); + assertThat(numberValue, equalTo((Number) 1)); + numShardsRun++; + } + } + assertThat(numShardsRun, greaterThan(0)); + } + public void testInitMapWithParams() { Map varsMap = new HashMap<>(); varsMap.put("multiplier", 1); From 875c9f32ccf537786a9bd890f58d7bf538974aa7 Mon Sep 17 00:00:00 2001 From: olcbean Date: Wed, 8 Nov 2017 14:10:11 +0100 Subject: [PATCH 03/19] ObjectParser: Replace IllegalStateException with ParsingException (#27302) Relates to #27147 --- .../common/xcontent/ObjectParser.java | 14 +++++++------- .../common/xcontent/ObjectParserTests.java | 9 +++------ .../bucket/geogrid/GeoHashGridParserTests.java | 9 +++------ .../search/sort/ScriptSortBuilderTests.java | 3 ++- .../phrase/DirectCandidateGeneratorTests.java | 4 ++-- 5 files changed, 17 insertions(+), 22 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index ed1d85b5a7644..8ba30178dc945 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -147,7 +147,7 @@ public Value parse(XContentParser parser, Value value, Context context) throws I } else { token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new IllegalStateException("[" + name + "] Expected START_OBJECT but was: " + token); + throw new ParsingException(parser.getTokenLocation(), "[" + name + "] Expected START_OBJECT but was: " + token); } } @@ -159,13 +159,13 @@ public Value parse(XContentParser parser, Value value, Context context) throws I fieldParser = getParser(currentFieldName); } else { if (currentFieldName == null) { - throw new IllegalStateException("[" + name + "] no field found"); + throw new ParsingException(parser.getTokenLocation(), "[" + name + "] no field found"); } if (fieldParser == null) { assert ignoreUnknownFields : "this should only be possible if configured to ignore known fields"; parser.skipChildren(); // noop if parser points to a value, skips children if parser is start object or start array } else { - fieldParser.assertSupports(name, token, currentFieldName); + fieldParser.assertSupports(name, token, currentFieldName, parser.getTokenLocation()); parseSub(parser, fieldParser, currentFieldName, value, context); } fieldParser = null; @@ -330,7 +330,7 @@ private void parseSub(XContentParser parser, FieldParser fieldParser, String cur case END_OBJECT: case END_ARRAY: case FIELD_NAME: - throw new IllegalStateException("[" + name + "]" + token + " is unexpected"); + throw new ParsingException(parser.getTokenLocation(), "[" + name + "]" + token + " is unexpected"); case VALUE_STRING: case VALUE_NUMBER: case VALUE_BOOLEAN: @@ -361,12 +361,12 @@ private class FieldParser { this.type = type; } - void assertSupports(String parserName, XContentParser.Token token, String currentFieldName) { + void assertSupports(String parserName, XContentParser.Token token, String currentFieldName, XContentLocation location) { if (parseField.match(currentFieldName) == false) { - throw new IllegalStateException("[" + parserName + "] parsefield doesn't accept: " + currentFieldName); + throw new ParsingException(location, "[" + parserName + "] parsefield doesn't accept: " + currentFieldName); } if (supportedTokens.contains(token) == false) { - throw new IllegalArgumentException( + throw new ParsingException(location, "[" + parserName + "] " + currentFieldName + " doesn't support values of type: " + token); } } diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index 0d879e4813116..baa2b3bcb36e6 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -34,6 +34,7 @@ import java.util.Collections; import java.util.List; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; public class ObjectParserTests extends ESTestCase { @@ -231,12 +232,8 @@ class TestStruct { TestStruct s = new TestStruct(); objectParser.declareField((i, c, x) -> c.test = i.text(), new ParseField("numeric_value"), ObjectParser.ValueType.FLOAT); - try { - objectParser.parse(parser, s, null); - fail("wrong type - must be number"); - } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "[foo] numeric_value doesn't support values of type: VALUE_BOOLEAN"); - } + Exception e = expectThrows(ParsingException.class, () -> objectParser.parse(parser, s, null)); + assertThat(e.getMessage(), containsString("[foo] numeric_value doesn't support values of type: VALUE_BOOLEAN")); } public void testParseNested() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java index fa9ddd74dde6e..7f46cb9e551a8 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -93,12 +94,8 @@ public void testParseErrorOnBooleanPrecision() throws Exception { XContentParser stParser = createParser(JsonXContent.jsonXContent, "{\"field\":\"my_loc\", \"precision\":false}"); XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); - try { - GeoGridAggregationBuilder.parse("geohash_grid", stParser); - fail(); - } catch (IllegalArgumentException ex) { - assertEquals("[geohash_grid] precision doesn't support values of type: VALUE_BOOLEAN", ex.getMessage()); - } + Exception e = expectThrows(ParsingException.class, () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); + assertThat(e.getMessage(), containsString("[geohash_grid] precision doesn't support values of type: VALUE_BOOLEAN")); } public void testParseErrorOnPrecisionOutOfRange() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java index d5911a41aee56..9a28740d7271f 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource; @@ -245,7 +246,7 @@ public void testParseUnexpectedToken() throws IOException { parser.nextToken(); parser.nextToken(); - Exception e = expectThrows(IllegalArgumentException.class, () -> ScriptSortBuilder.fromXContent(parser, null)); + Exception e = expectThrows(ParsingException.class, () -> ScriptSortBuilder.fromXContent(parser, null)); assertEquals("[_script] script doesn't support values of type: START_ARRAY", e.getMessage()); } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java index 10022cc289a71..c92fba09d8cb4 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java @@ -146,7 +146,7 @@ public void testIllegalXContent() throws IOException { logger.info("Skipping test as it uses a custom duplicate check that is obsolete when strict duplicate checks are enabled."); } else { directGenerator = "{ \"field\" : \"f1\", \"field\" : \"f2\" }"; - assertIllegalXContent(directGenerator, ParsingException.class, + assertIllegalXContent(directGenerator, IllegalArgumentException.class, "[direct_generator] failed to parse field [field]"); } @@ -162,7 +162,7 @@ public void testIllegalXContent() throws IOException { // test unexpected token directGenerator = "{ \"size\" : [ \"xxl\" ] }"; - assertIllegalXContent(directGenerator, IllegalArgumentException.class, + assertIllegalXContent(directGenerator, ParsingException.class, "[direct_generator] size doesn't support values of type: START_ARRAY"); } From 2a46cf7b960994c3915d3e60d3e474ad70c3408b Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 8 Nov 2017 09:14:08 -0500 Subject: [PATCH 04/19] Roll translog generation on primary promotion When a primary is promoted, rolling the translog generation here makes simpler reasoning about the relationship between primary terms and translog generation. Note that this is not strictly necessary for correctness (e.g., to avoid duplicate operations with the same sequence number within a single generation). Relates #27313 --- .../elasticsearch/index/shard/IndexShard.java | 6 ++- .../index/shard/IndexShardTests.java | 45 +++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 40ac12b8e41c4..fffedb09af698 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -472,8 +472,12 @@ public void updateShardState(final ShardRouting newRouting, * subsequently fails before the primary/replica re-sync completes successfully and we are now being * promoted, the local checkpoint tracker here could be left in a state where it would re-issue sequence * numbers. To ensure that this is not the case, we restore the state of the local checkpoint tracker by - * replaying the translog and marking any operations there are completed. + * replaying the translog and marking any operations there are completed. Rolling the translog generation is + * not strictly needed here (as we will never have collisions between sequence numbers in a translog + * generation in a new primary as it takes the last known sequence number as a starting point), but it + * simplifies reasoning about the relationship between primary terms and translog generations. */ + getEngine().rollTranslogGeneration(); getEngine().restoreLocalCheckpointFromTranslog(); getEngine().fillSeqNoGaps(newPrimaryTerm); getEngine().seqNoService().updateLocalCheckpointForShard(currentRouting.allocationId().getId(), diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index bf4b577ad29f0..db0421fc485e2 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -508,6 +508,51 @@ public void onFailure(Exception e) { closeShards(indexShard); } + public void testPrimaryPromotionRollsGeneration() throws Exception { + final IndexShard indexShard = newStartedShard(false); + + final long currentTranslogGeneration = indexShard.getTranslog().getGeneration().translogFileGeneration; + + // promote the replica + final ShardRouting replicaRouting = indexShard.routingEntry(); + final ShardRouting primaryRouting = + newShardRouting( + replicaRouting.shardId(), + replicaRouting.currentNodeId(), + null, + true, + ShardRoutingState.STARTED, + replicaRouting.allocationId()); + indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {}, + 0L, Collections.singleton(primaryRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); + + /* + * This operation completing means that the delay operation executed as part of increasing the primary term has completed and the + * gaps are filled. + */ + final CountDownLatch latch = new CountDownLatch(1); + indexShard.acquirePrimaryOperationPermit( + new ActionListener() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }, + ThreadPool.Names.GENERIC); + + latch.await(); + assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1)); + + closeShards(indexShard); + } + public void testOperationPermitsOnPrimaryShards() throws InterruptedException, ExecutionException, IOException { final ShardId shardId = new ShardId("test", "_na_", 0); final IndexShard indexShard; From 608c9df6285f76097d883b8625301b4dcba0cdd3 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 8 Nov 2017 09:33:12 -0500 Subject: [PATCH 05/19] Correct comment in index shard test This commit fixes a comment in an index shard test which was inaccurate after it was copied from another test and not modified to reflect the reasoning in the test that it was copied into. --- .../java/org/elasticsearch/index/shard/IndexShardTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index db0421fc485e2..89e2f8441741d 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -529,7 +529,7 @@ public void testPrimaryPromotionRollsGeneration() throws Exception { /* * This operation completing means that the delay operation executed as part of increasing the primary term has completed and the - * gaps are filled. + * translog generation has rolled. */ final CountDownLatch latch = new CountDownLatch(1); indexShard.acquirePrimaryOperationPermit( From 16c49c22f9d689b3c1ad6d706a9e8c5092e1593b Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 8 Nov 2017 16:38:41 +0100 Subject: [PATCH 06/19] testCreateSplitIndexToN: do not set `routing_partition_size` to >= `number_of_routing_shards` It's an illegal value --- .../action/admin/indices/create/SplitIndexIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 8f24edf8577e4..ebd647d0e02fd 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -87,11 +87,12 @@ public void testCreateSplitIndexToN() { final boolean useRouting = randomBoolean(); final boolean useMixedRouting = useRouting ? randomBoolean() : false; CreateIndexRequestBuilder createInitialIndex = prepareCreate("source"); + final int routingShards = shardSplits[2] * randomIntBetween(1, 10); Settings.Builder settings = Settings.builder().put(indexSettings()) .put("number_of_shards", shardSplits[0]) - .put("index.number_of_routing_shards", shardSplits[2] * randomIntBetween(1, 10)); + .put("index.number_of_routing_shards", routingShards); if (useRouting && useMixedRouting == false && randomBoolean()) { - settings.put("index.routing_partition_size", randomIntBetween(1, 10)); + settings.put("index.routing_partition_size", randomIntBetween(1, routingShards - 1)); createInitialIndex.addMapping("t1", "_routing", "required=true"); } logger.info("use routing {} use mixed routing {}", useRouting, useMixedRouting); From 88f1a647081a26c49f873a142f5df6fb99c7ef80 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Wed, 8 Nov 2017 18:15:36 +0200 Subject: [PATCH 07/19] Add unreleased 5.6.5 version number --- core/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 9ac12abdef22f..7bac7c022f274 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -100,6 +100,8 @@ public class Version implements Comparable { public static final Version V_5_6_3 = new Version(V_5_6_3_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_5_6_4_ID = 5060499; public static final Version V_5_6_4 = new Version(V_5_6_4_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); + public static final int V_5_6_5_ID = 5060599; + public static final Version V_5_6_5 = new Version(V_5_6_5_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); public static final int V_6_0_0_alpha2_ID = 6000002; @@ -148,6 +150,8 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_6_5_ID: + return V_5_6_5; case V_5_6_4_ID: return V_5_6_4; case V_5_6_3_ID: From 899bc47afdcd51688b0973537227ac83111bf5c9 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Wed, 8 Nov 2017 10:43:28 -0500 Subject: [PATCH 08/19] Snapshot/Restore: better handle incorrect chunk_size settings in FS repo (#26844) Specifying a negative value or null as a chunk_size in FS repository can lead to corrupt snapshots. Closes #26843 --- .../BlobStoreIndexShardSnapshot.java | 2 +- .../blobstore/BlobStoreRepository.java | 4 ++ .../repositories/fs/FsRepository.java | 12 +++--- .../SharedClusterSnapshotRestoreIT.java | 37 +++++++++++-------- 4 files changed, 32 insertions(+), 23 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 0abc0724c3a4e..b0767a7c512ec 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -66,7 +66,7 @@ public FileInfo(String name, StoreFileMetaData metaData, ByteSizeValue partSize) this.metadata = metaData; long partBytes = Long.MAX_VALUE; - if (partSize != null) { + if (partSize != null && partSize.getBytes() > 0) { partBytes = partSize.getBytes(); } diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index dccf12c8ed3b9..939c33d00a8d4 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -241,6 +241,10 @@ protected BlobStoreRepository(RepositoryMetaData metadata, Settings globalSettin BlobStoreIndexShardSnapshot::fromXContent, namedXContentRegistry, isCompress()); indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT, BlobStoreIndexShardSnapshots::fromXContent, namedXContentRegistry, isCompress()); + ByteSizeValue chunkSize = chunkSize(); + if (chunkSize != null && chunkSize.getBytes() <= 0) { + throw new IllegalArgumentException("the chunk size cannot be negative: [" + chunkSize + "]"); + } } @Override diff --git a/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index b490a2e784dc1..4d4ab60feef0f 100644 --- a/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -54,10 +54,10 @@ public class FsRepository extends BlobStoreRepository { new Setting<>("location", "", Function.identity(), Property.NodeScope); public static final Setting REPOSITORIES_LOCATION_SETTING = new Setting<>("repositories.fs.location", LOCATION_SETTING, Function.identity(), Property.NodeScope); - public static final Setting CHUNK_SIZE_SETTING = - Setting.byteSizeSetting("chunk_size", new ByteSizeValue(-1), Property.NodeScope); - public static final Setting REPOSITORIES_CHUNK_SIZE_SETTING = - Setting.byteSizeSetting("repositories.fs.chunk_size", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", + new ByteSizeValue(Long.MAX_VALUE), new ByteSizeValue(5), new ByteSizeValue(Long.MAX_VALUE), Property.NodeScope); + public static final Setting REPOSITORIES_CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.fs.chunk_size", + new ByteSizeValue(Long.MAX_VALUE), new ByteSizeValue(5), new ByteSizeValue(Long.MAX_VALUE), Property.NodeScope); public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); public static final Setting REPOSITORIES_COMPRESS_SETTING = Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope); @@ -95,10 +95,8 @@ public FsRepository(RepositoryMetaData metadata, Environment environment, blobStore = new FsBlobStore(settings, locationFile); if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - } else if (REPOSITORIES_CHUNK_SIZE_SETTING.exists(settings)) { - this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(settings); } else { - this.chunkSize = null; + this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(settings); } this.compress = COMPRESS_SETTING.exists(metadata.settings()) ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings); this.basePath = BlobPath.cleanPath(); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index a5e92d89906cc..6a6825a8adaac 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -104,6 +104,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAliasesExist; @@ -135,15 +136,29 @@ protected Collection> nodePlugins() { MockRepository.Plugin.class); } + private Settings randomRepoSettings() { + Settings.Builder repoSettings = Settings.builder(); + repoSettings.put("location", randomRepoPath()); + if (randomBoolean()) { + repoSettings.put("compress", randomBoolean()); + } + if (randomBoolean()) { + repoSettings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + } else { + if (randomBoolean()) { + repoSettings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + } else { + repoSettings.put("chunk_size", (String) null); + } + } + return repoSettings.build(); + } + public void testBasicWorkFlow() throws Exception { Client client = client(); logger.info("--> creating repository"); - assertAcked(client.admin().cluster().preparePutRepository("test-repo") - .setType("fs").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(randomRepoSettings())); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -308,11 +323,7 @@ public void testFreshIndexUUID() { Client client = client(); logger.info("--> creating repository"); - assertAcked(client.admin().cluster().preparePutRepository("test-repo") - .setType("fs").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(randomRepoSettings())); createIndex("test"); String originalIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID); @@ -356,11 +367,7 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { Client client = client(); logger.info("--> creating repository"); - assertAcked(client.admin().cluster().preparePutRepository("test-repo") - .setType("fs").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(randomRepoSettings())); logger.info("--> create index with foo type"); assertAcked(prepareCreate("test-idx", 2, Settings.builder() From 4f20698b7251d0990ac2eedbdcb9dd8b82bd6a8f Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 3 Nov 2017 19:36:08 -0400 Subject: [PATCH 09/19] Fix snapshot getting stuck in INIT state (#27214) If the master disconnects from the cluster after initiating snapshot, but just before the snapshot switches from INIT to STARTED state, the snapshot can get indefinitely stuck in the INIT state. This error is specific to v5.x+ and was triggered by keeping the master node that stepped down in the node list, the cleanup logic in snapshot/restore assumed that if master steps down it is always removed from the the node list. This commit changes the logic to trigger cleanup even if no nodes left the cluster. Closes #27180 --- .../common/blobstore/fs/FsBlobContainer.java | 4 +- .../snapshots/SnapshotsService.java | 38 +++- .../discovery/SnapshotDisruptionIT.java | 173 ++++++++++++++++++ 3 files changed, 209 insertions(+), 6 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index 757cce7d8379a..1e384109aebce 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -140,7 +140,9 @@ public void move(String source, String target) throws IOException { Path targetPath = path.resolve(target); // If the target file exists then Files.move() behaviour is implementation specific // the existing file might be replaced or this method fails by throwing an IOException. - assert !Files.exists(targetPath); + if (Files.exists(targetPath)) { + throw new FileAlreadyExistsException("blob [" + targetPath + "] already exists, cannot overwrite"); + } Files.move(sourcePath, targetPath, StandardCopyOption.ATOMIC_MOVE); IOUtils.fsync(path, true); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 037db4d5caf66..0804e69e46e23 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -425,6 +425,15 @@ public void onFailure(String source, Exception e) { removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e)); } + @Override + public void onNoLongerMaster(String source) { + // We are not longer a master - we shouldn't try to do any cleanup + // The new master will take care of it + logger.warn("[{}] failed to create snapshot - no longer a master", snapshot.snapshot().getSnapshotId()); + userCreateSnapshotListener.onFailure( + new SnapshotException(snapshot.snapshot(), "master changed during snapshot initialization")); + } + @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { // The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted @@ -473,6 +482,10 @@ public void onFailure(Exception e) { cleanupAfterError(e); } + public void onNoLongerMaster(String source) { + userCreateSnapshotListener.onFailure(e); + } + private void cleanupAfterError(Exception exception) { if(snapshotCreated) { try { @@ -628,7 +641,8 @@ private SnapshotShardFailure findShardFailure(List shardFa public void applyClusterState(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { - if (event.nodesRemoved()) { + // We don't remove old master when master flips anymore. So, we need to check for change in master + if (event.nodesRemoved() || event.previousState().nodes().isLocalNodeElectedMaster() == false) { processSnapshotsOnRemovedNodes(event); } if (event.routingTableChanged()) { @@ -981,7 +995,7 @@ private void removeSnapshotFromClusterState(final Snapshot snapshot, final Snaps * @param listener listener to notify when snapshot information is removed from the cluster state */ private void removeSnapshotFromClusterState(final Snapshot snapshot, final SnapshotInfo snapshotInfo, final Exception failure, - @Nullable ActionListener listener) { + @Nullable CleanupAfterErrorListener listener) { clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() { @Override @@ -1013,6 +1027,13 @@ public void onFailure(String source, Exception e) { } } + @Override + public void onNoLongerMaster(String source) { + if (listener != null) { + listener.onNoLongerMaster(source); + } + } + @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { for (SnapshotCompletionListener listener : snapshotCompletionListeners) { @@ -1183,9 +1204,16 @@ public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapsh if (completedSnapshot.equals(snapshot)) { logger.debug("deleted snapshot completed - deleting files"); removeListener(this); - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> - deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(), - listener, true) + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + try { + deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(), + listener, true); + + } catch (Exception ex) { + logger.warn((Supplier) () -> + new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); + } + } ); } } diff --git a/core/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/core/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java new file mode 100644 index 0000000000000..3458cca0cf78e --- /dev/null +++ b/core/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotMissingException; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.NetworkDisruption; +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.instanceOf; + +/** + * Tests snapshot operations during disruptions. + */ +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false) +@TestLogging("org.elasticsearch.snapshot:TRACE") +public class SnapshotDisruptionIT extends AbstractDisruptionTestCase { + + public void testDisruptionOnSnapshotInitialization() throws Exception { + final Settings settings = Settings.builder() + .put(DEFAULT_SETTINGS) + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed + .build(); + final String idxName = "test"; + configureCluster(settings, 4, null, 2); + final List allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(3); + final String dataNode = internalCluster().startDataOnlyNode(); + ensureStableCluster(4); + + createRandomIndex(idxName); + + logger.info("--> creating repository"); + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs").setSettings(Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + + // Writing incompatible snapshot can cause this test to fail due to a race condition in repo initialization + // by the current master and the former master. It is not causing any issues in real life scenario, but + // might make this test to fail. We are going to complete initialization of the snapshot to prevent this failures. + logger.info("--> initializing the repository"); + assertEquals(SnapshotState.SUCCESS, client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1") + .setWaitForCompletion(true).setIncludeGlobalState(true).setIndices().get().getSnapshotInfo().state()); + + final String masterNode1 = internalCluster().getMasterName(); + Set otherNodes = new HashSet<>(); + otherNodes.addAll(allMasterEligibleNodes); + otherNodes.remove(masterNode1); + otherNodes.add(dataNode); + + NetworkDisruption networkDisruption = + new NetworkDisruption(new NetworkDisruption.TwoPartitions(Collections.singleton(masterNode1), otherNodes), + new NetworkDisruption.NetworkUnresponsive()); + internalCluster().setDisruptionScheme(networkDisruption); + + ClusterService clusterService = internalCluster().clusterService(masterNode1); + CountDownLatch disruptionStarted = new CountDownLatch(1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + SnapshotsInProgress snapshots = event.state().custom(SnapshotsInProgress.TYPE); + if (snapshots != null && snapshots.entries().size() > 0) { + if (snapshots.entries().get(0).state() == SnapshotsInProgress.State.INIT) { + // The snapshot started, we can start disruption so the INIT state will arrive to another master node + logger.info("--> starting disruption"); + networkDisruption.startDisrupting(); + clusterService.removeListener(this); + disruptionStarted.countDown(); + } + } + } + }); + + logger.info("--> starting snapshot"); + ActionFuture future = client(masterNode1).admin().cluster() + .prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(false).setIndices(idxName).execute(); + + logger.info("--> waiting for disruption to start"); + assertTrue(disruptionStarted.await(1, TimeUnit.MINUTES)); + + logger.info("--> wait until the snapshot is done"); + assertBusy(() -> { + SnapshotsInProgress snapshots = dataNodeClient().admin().cluster().prepareState().setLocal(true).get().getState() + .custom(SnapshotsInProgress.TYPE); + if (snapshots != null && snapshots.entries().size() > 0) { + logger.info("Current snapshot state [{}]", snapshots.entries().get(0).state()); + fail("Snapshot is still running"); + } else { + logger.info("Snapshot is no longer in the cluster state"); + } + }, 1, TimeUnit.MINUTES); + + logger.info("--> verify that snapshot was successful or no longer exist"); + assertBusy(() -> { + try { + GetSnapshotsResponse snapshotsStatusResponse = dataNodeClient().admin().cluster().prepareGetSnapshots("test-repo") + .setSnapshots("test-snap-2").get(); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + logger.info("--> done verifying"); + } catch (SnapshotMissingException exception) { + logger.info("--> snapshot doesn't exist"); + } + }, 1, TimeUnit.MINUTES); + + logger.info("--> stopping disrupting"); + networkDisruption.stopDisrupting(); + ensureStableCluster(4, masterNode1); + logger.info("--> done"); + + try { + future.get(); + } catch (Exception ex) { + logger.info("--> got exception from hanged master", ex); + Throwable cause = ex.getCause(); + assertThat(cause, instanceOf(MasterNotDiscoveredException.class)); + cause = cause.getCause(); + assertThat(cause, instanceOf(Discovery.FailedToCommitClusterStateException.class)); + } + } + + private void createRandomIndex(String idxName) throws ExecutionException, InterruptedException { + assertAcked(prepareCreate(idxName, 0, Settings.builder().put("number_of_shards", between(1, 20)) + .put("number_of_replicas", 0))); + logger.info("--> indexing some data"); + final int numdocs = randomIntBetween(10, 100); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(idxName, "type1", Integer.toString(i)).setSource("field1", "bar " + i); + } + indexRandom(true, builders); + } +} From bdd00637076b0f372c01aa6066d69152fbba9b91 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 8 Nov 2017 15:22:31 -0500 Subject: [PATCH 10/19] Remove colons from task and configuration names Gradle 5.0 will remove support for colons in configuration and task names. This commit fixes this for our build by removing all current uses of colons in configuration and task names. Relates #27305 --- .../org/elasticsearch/gradle/BuildPlugin.groovy | 2 +- .../gradle/test/ClusterFormationTasks.groovy | 16 ++++++++++++---- plugins/repository-hdfs/build.gradle | 2 +- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 159f84258df28..add518822e07a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -239,7 +239,7 @@ class BuildPlugin implements Plugin { /** Return the configuration name used for finding transitive deps of the given dependency. */ private static String transitiveDepConfigName(String groupId, String artifactId, String version) { - return "_transitive_${groupId}:${artifactId}:${version}" + return "_transitive_${groupId}_${artifactId}_${version}" } /** diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 894fce0fadcc1..d9f99b4c28552 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -429,7 +429,7 @@ class ClusterFormationTasks { Project pluginProject = plugin.getValue() verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) - String configurationName = "_plugin_${prefix}_${pluginProject.path}" + String configurationName = pluginConfigurationName(prefix, pluginProject) Configuration configuration = project.configurations.findByName(configurationName) if (configuration == null) { configuration = project.configurations.create(configurationName) @@ -458,13 +458,21 @@ class ClusterFormationTasks { return copyPlugins } + private static String pluginConfigurationName(final String prefix, final Project project) { + return "_plugin_${prefix}_${project.path}".replace(':', '_') + } + + private static String pluginBwcConfigurationName(final String prefix, final Project project) { + return "_plugin_bwc_${prefix}_${project.path}".replace(':', '_') + } + /** Configures task to copy a plugin based on a zip file resolved using dependencies for an older version */ static Task configureCopyBwcPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) { Configuration bwcPlugins = project.configurations.getByName("${prefix}_elasticsearchBwcPlugins") for (Map.Entry plugin : node.config.plugins.entrySet()) { Project pluginProject = plugin.getValue() verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) - String configurationName = "_plugin_bwc_${prefix}_${pluginProject.path}" + String configurationName = pluginBwcConfigurationName(prefix, pluginProject) Configuration configuration = project.configurations.findByName(configurationName) if (configuration == null) { configuration = project.configurations.create(configurationName) @@ -503,9 +511,9 @@ class ClusterFormationTasks { static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin, String prefix) { final FileCollection pluginZip; if (node.nodeVersion != VersionProperties.elasticsearch) { - pluginZip = project.configurations.getByName("_plugin_bwc_${prefix}_${plugin.path}") + pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, plugin)) } else { - pluginZip = project.configurations.getByName("_plugin_${prefix}_${plugin.path}") + pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, plugin)) } // delay reading the file location until execution time by wrapping in a closure within a GString final Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}" diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 2f45cb5ab8f21..6426f588352b7 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -106,7 +106,7 @@ List principals = [ "elasticsearch", "hdfs/hdfs.build.elastic.co" ] String realm = "BUILD.ELASTIC.CO" for (String principal : principals) { - Task create = project.tasks.create("addPrincipal#${principal}", org.elasticsearch.gradle.vagrant.VagrantCommandTask) { + Task create = project.tasks.create("addPrincipal#${principal}".replace('/', '_'), org.elasticsearch.gradle.vagrant.VagrantCommandTask) { command 'ssh' args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal" boxName box From ac0df4232c5af99d5c3e3cb3354ee51e6643c6b2 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 8 Nov 2017 15:27:15 -0500 Subject: [PATCH 11/19] Fix find remote when building BWC We look for the remote by scanning the output of "git remote -v" but we were not actually looking at the output since standard output was not redirected anywhere. This commit fixes this issue. Relates #27308 --- distribution/bwc/build.gradle | 2 ++ 1 file changed, 2 insertions(+) diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 0cd812bbf295b..1e8d45a232fee 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -77,6 +77,8 @@ if (enabled) { dependsOn createClone workingDir = checkoutDir commandLine = ['git', 'remote', '-v'] + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output doLast { project.ext.remoteExists = false output.toString('UTF-8').eachLine { From ca6349e764faad4bf948ecc80f287a5a85da46d1 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Fri, 18 Aug 2017 13:38:38 -0500 Subject: [PATCH 12/19] Use PlainListenableActionFuture for CloseFuture (#26242) Right now we use a custom future for the CloseFuture associated with a channel. This is because we need special unwrapping logic to ensure that exceptions from a future failure are a certain type (opposed to an UncategorizedException). However, the current version is limiting because we can only attach one listener. This commit changes the CloseFuture to extend the PlainListenableActionFuture. This change allows us to attach multiple listeners. --- .../elasticsearch/action/ActionListener.java | 2 +- .../support/PlainListenableActionFuture.java | 2 +- .../transport/nio/NioClient.java | 4 -- .../transport/nio/NioTransport.java | 6 +-- .../nio/channel/AbstractNioChannel.java | 2 +- .../transport/nio/channel/ChannelFactory.java | 5 ++- .../transport/nio/channel/CloseFuture.java | 42 ++++++------------- .../channel/NioServerSocketChannelTests.java | 7 +++- .../nio/channel/NioSocketChannelTests.java | 7 +++- 9 files changed, 30 insertions(+), 47 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/ActionListener.java b/core/src/main/java/org/elasticsearch/action/ActionListener.java index f9fafa9f95a2e..fa32ab417737c 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/core/src/main/java/org/elasticsearch/action/ActionListener.java @@ -45,7 +45,7 @@ public interface ActionListener { * Creates a listener that listens for a response (or failure) and executes the * corresponding consumer when the response (or failure) is received. * - * @param onResponse the consumer of the response, when the listener receives one + * @param onResponse the checked consumer of the response, when the listener receives one * @param onFailure the consumer of the failure, when the listener receives one * @param the type of the response * @return a listener that listens for responses and invokes the consumer when received diff --git a/core/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java b/core/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java index 749bf1fea019d..943c36797096c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java +++ b/core/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java @@ -33,7 +33,7 @@ public class PlainListenableActionFuture extends AdapterActionFuture im volatile Object listeners; boolean executedListeners = false; - private PlainListenableActionFuture() {} + protected PlainListenableActionFuture() {} /** * This method returns a listenable future. The listeners will be called on completion of the future. diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioClient.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioClient.java index f877b7e9153a6..ee0b32db0149a 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioClient.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioClient.java @@ -138,10 +138,6 @@ private void closeChannels(ArrayList connections, Exception e) for (final NioSocketChannel socketChannel : connections) { try { socketChannel.closeAsync().awaitClose(); - } catch (InterruptedException inner) { - logger.trace("exception while closing channel", e); - e.addSuppressed(inner); - Thread.currentThread().interrupt(); } catch (Exception inner) { logger.trace("exception while closing channel", e); e.addSuppressed(inner); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 0c00f34c69a17..9eabcc56f28cd 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -131,11 +131,7 @@ protected void closeChannels(List channels, boolean blocking, boolea for (CloseFuture future : futures) { try { future.awaitClose(); - IOException closeException = future.getCloseException(); - if (closeException != null) { - closingExceptions = addClosingException(closingExceptions, closeException); - } - } catch (InterruptedException e) { + } catch (Exception e) { closingExceptions = addClosingException(closingExceptions, e); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/AbstractNioChannel.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/AbstractNioChannel.java index ee3a9d0381f00..c550785fac517 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/AbstractNioChannel.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/AbstractNioChannel.java @@ -113,7 +113,7 @@ public void closeFromSelector() { closeRawChannel(); closedOnThisCall = closeFuture.channelClosed(this); } catch (IOException e) { - closedOnThisCall = closeFuture.channelCloseThrewException(this, e); + closedOnThisCall = closeFuture.channelCloseThrewException(e); } finally { if (closedOnThisCall) { selector.removeRegisteredChannel(this); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java index c25936ce7fc01..f2f92e94e509d 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java @@ -21,6 +21,7 @@ import org.apache.lucene.util.IOUtils; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.mocksocket.PrivilegedSocketAccess; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.nio.AcceptingSelector; @@ -55,7 +56,7 @@ public NioSocketChannel openNioChannel(InetSocketAddress remoteAddress, SocketSe SocketChannel rawChannel = rawChannelFactory.openNioChannel(remoteAddress); NioSocketChannel channel = new NioSocketChannel(NioChannel.CLIENT, rawChannel, selector); channel.setContexts(new TcpReadContext(channel, handler), new TcpWriteContext(channel)); - channel.getCloseFuture().setListener(closeListener); + channel.getCloseFuture().addListener(ActionListener.wrap(closeListener::accept, (e) -> closeListener.accept(channel))); scheduleChannel(channel, selector); return channel; } @@ -65,7 +66,7 @@ public NioSocketChannel acceptNioChannel(NioServerSocketChannel serverChannel, S SocketChannel rawChannel = rawChannelFactory.acceptNioChannel(serverChannel); NioSocketChannel channel = new NioSocketChannel(serverChannel.getProfile(), rawChannel, selector); channel.setContexts(new TcpReadContext(channel, handler), new TcpWriteContext(channel)); - channel.getCloseFuture().setListener(closeListener); + channel.getCloseFuture().addListener(ActionListener.wrap(closeListener::accept, (e) -> closeListener.accept(channel))); scheduleChannel(channel, selector); return channel; } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/CloseFuture.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/CloseFuture.java index c27ba306e0e60..5932de8fef708 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/CloseFuture.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/CloseFuture.java @@ -19,35 +19,37 @@ package org.elasticsearch.transport.nio.channel; -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.common.util.concurrent.BaseFuture; +import org.elasticsearch.action.support.PlainListenableActionFuture; import java.io.IOException; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.function.Consumer; -public class CloseFuture extends BaseFuture { - - private final SetOnce> listener = new SetOnce<>(); +public class CloseFuture extends PlainListenableActionFuture { @Override public boolean cancel(boolean mayInterruptIfRunning) { throw new UnsupportedOperationException("Cannot cancel close future"); } - public void awaitClose() throws InterruptedException, IOException { + public void awaitClose() throws IOException { try { super.get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { throw (IOException) e.getCause(); } } - public void awaitClose(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException, IOException { + public void awaitClose(long timeout, TimeUnit unit) throws TimeoutException, IOException { try { super.get(timeout, unit); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { throw (IOException) e.getCause(); } @@ -76,31 +78,13 @@ public boolean isClosed() { return super.isDone(); } - public void setListener(Consumer listener) { - this.listener.set(listener); - } - boolean channelClosed(NioChannel channel) { - boolean set = set(channel); - if (set) { - Consumer listener = this.listener.get(); - if (listener != null) { - listener.accept(channel); - } - } - return set; + return set(channel); } - boolean channelCloseThrewException(NioChannel channel, IOException ex) { - boolean set = setException(ex); - if (set) { - Consumer listener = this.listener.get(); - if (listener != null) { - listener.accept(channel); - } - } - return set; + boolean channelCloseThrewException(IOException ex) { + return setException(ex); } } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannelTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannelTests.java index 6f05d3c1f34c6..367df0c78f4c8 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannelTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannelTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.nio.channel; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.nio.AcceptingSelector; import org.elasticsearch.transport.nio.AcceptorEventHandler; @@ -33,6 +34,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; import java.util.function.Supplier; import static org.mockito.Mockito.mock; @@ -64,10 +66,11 @@ public void testClose() throws IOException, TimeoutException, InterruptedExcepti CountDownLatch latch = new CountDownLatch(1); NioChannel channel = new DoNotCloseServerChannel("nio", mock(ServerSocketChannel.class), mock(ChannelFactory.class), selector); - channel.getCloseFuture().setListener((c) -> { + Consumer listener = (c) -> { ref.set(c); latch.countDown(); - }); + }; + channel.getCloseFuture().addListener(ActionListener.wrap(listener::accept, (e) -> listener.accept(channel))); CloseFuture closeFuture = channel.getCloseFuture(); diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioSocketChannelTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioSocketChannelTests.java index 3d039b41a8a68..75ec57b2603db 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioSocketChannelTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioSocketChannelTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.nio.channel; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.nio.SocketEventHandler; import org.elasticsearch.transport.nio.SocketSelector; @@ -34,6 +35,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.Consumer; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.mock; @@ -67,10 +69,11 @@ public void testClose() throws IOException, TimeoutException, InterruptedExcepti NioSocketChannel socketChannel = new DoNotCloseChannel(NioChannel.CLIENT, mock(SocketChannel.class), selector); socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class)); - socketChannel.getCloseFuture().setListener((c) -> { + Consumer listener = (c) -> { ref.set(c); latch.countDown(); - }); + }; + socketChannel.getCloseFuture().addListener(ActionListener.wrap(listener::accept, (e) -> listener.accept(socketChannel))); CloseFuture closeFuture = socketChannel.getCloseFuture(); assertFalse(closeFuture.isClosed()); From f35b6b23e1689a1620d25fce617952e5b7ce3f2f Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Wed, 8 Nov 2017 14:30:00 -0700 Subject: [PATCH 13/19] Decouple `ChannelFactory` from Tcp classes (#27286) * Decouple `ChannelFactory` from Tcp classes This is related to #27260. Currently `ChannelFactory` is tightly coupled to classes related to the elasticsearch Tcp binary protocol. This commit modifies the factory to be able to construct http or other protocol channels. --- .../transport/nio/NioTransport.java | 9 ++-- .../transport/nio/channel/ChannelFactory.java | 47 +++++++++++++------ .../nio/channel/ChannelFactoryTests.java | 12 ++++- 3 files changed, 50 insertions(+), 18 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 9eabcc56f28cd..686432722a204 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -41,6 +41,8 @@ import org.elasticsearch.transport.nio.channel.NioChannel; import org.elasticsearch.transport.nio.channel.NioServerSocketChannel; import org.elasticsearch.transport.nio.channel.NioSocketChannel; +import org.elasticsearch.transport.nio.channel.TcpReadContext; +import org.elasticsearch.transport.nio.channel.TcpWriteContext; import java.io.IOException; import java.net.InetSocketAddress; @@ -68,7 +70,7 @@ public class NioTransport extends TcpTransport { public static final Setting NIO_ACCEPTOR_COUNT = intSetting("transport.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); - private final TcpReadHandler tcpReadHandler = new TcpReadHandler(this); + private final Consumer contextSetter; private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); private final OpenChannels openChannels = new OpenChannels(logger); private final ArrayList acceptors = new ArrayList<>(); @@ -79,6 +81,7 @@ public class NioTransport extends TcpTransport { public NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super("nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); + contextSetter = (c) -> c.setContexts(new TcpReadContext(c, new TcpReadHandler(this)), new TcpWriteContext(c)); } @Override @@ -206,7 +209,7 @@ protected void doStart() { // loop through all profiles and start them up, special handling for default one for (ProfileSettings profileSettings : profileSettings) { - profileToChannelFactory.putIfAbsent(profileSettings.profileName, new ChannelFactory(profileSettings, tcpReadHandler)); + profileToChannelFactory.putIfAbsent(profileSettings.profileName, new ChannelFactory(profileSettings, contextSetter)); bindServer(profileSettings); } } @@ -243,7 +246,7 @@ final void exceptionCaught(NioSocketChannel channel, Throwable cause) { private NioClient createClient() { Supplier selectorSupplier = new RoundRobinSelectorSupplier(socketSelectors); - ChannelFactory channelFactory = new ChannelFactory(new ProfileSettings(settings, "default"), tcpReadHandler); + ChannelFactory channelFactory = new ChannelFactory(new ProfileSettings(settings, "default"), contextSetter); return new NioClient(logger, openChannels, selectorSupplier, defaultConnectionProfile.getConnectTimeout(), channelFactory); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java index f2f92e94e509d..199bab9a904b0 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java @@ -26,7 +26,6 @@ import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.nio.AcceptingSelector; import org.elasticsearch.transport.nio.SocketSelector; -import org.elasticsearch.transport.nio.TcpReadHandler; import java.io.Closeable; import java.io.IOException; @@ -39,15 +38,28 @@ public class ChannelFactory { - private final TcpReadHandler handler; + private final Consumer contextSetter; private final RawChannelFactory rawChannelFactory; - public ChannelFactory(TcpTransport.ProfileSettings profileSettings, TcpReadHandler handler) { - this(new RawChannelFactory(profileSettings), handler); + /** + * This will create a {@link ChannelFactory} using the profile settings and context setter passed to this + * constructor. The context setter must be a {@link Consumer} that calls + * {@link NioSocketChannel#setContexts(ReadContext, WriteContext)} with the appropriate read and write + * contexts. The read and write contexts handle the protocol specific encoding and decoding of messages. + * + * @param profileSettings the profile settings channels opened by this factory + * @param contextSetter a consumer that takes a channel and sets the read and write contexts + */ + public ChannelFactory(TcpTransport.ProfileSettings profileSettings, Consumer contextSetter) { + this(new RawChannelFactory(profileSettings.tcpNoDelay, + profileSettings.tcpKeepAlive, + profileSettings.reuseAddress, + Math.toIntExact(profileSettings.sendBufferSize.getBytes()), + Math.toIntExact(profileSettings.receiveBufferSize.getBytes())), contextSetter); } - ChannelFactory(RawChannelFactory rawChannelFactory, TcpReadHandler handler) { - this.handler = handler; + ChannelFactory(RawChannelFactory rawChannelFactory, Consumer contextSetter) { + this.contextSetter = contextSetter; this.rawChannelFactory = rawChannelFactory; } @@ -55,7 +67,7 @@ public NioSocketChannel openNioChannel(InetSocketAddress remoteAddress, SocketSe Consumer closeListener) throws IOException { SocketChannel rawChannel = rawChannelFactory.openNioChannel(remoteAddress); NioSocketChannel channel = new NioSocketChannel(NioChannel.CLIENT, rawChannel, selector); - channel.setContexts(new TcpReadContext(channel, handler), new TcpWriteContext(channel)); + setContexts(channel); channel.getCloseFuture().addListener(ActionListener.wrap(closeListener::accept, (e) -> closeListener.accept(channel))); scheduleChannel(channel, selector); return channel; @@ -65,7 +77,7 @@ public NioSocketChannel acceptNioChannel(NioServerSocketChannel serverChannel, S Consumer closeListener) throws IOException { SocketChannel rawChannel = rawChannelFactory.acceptNioChannel(serverChannel); NioSocketChannel channel = new NioSocketChannel(serverChannel.getProfile(), rawChannel, selector); - channel.setContexts(new TcpReadContext(channel, handler), new TcpWriteContext(channel)); + setContexts(channel); channel.getCloseFuture().addListener(ActionListener.wrap(closeListener::accept, (e) -> closeListener.accept(channel))); scheduleChannel(channel, selector); return channel; @@ -97,6 +109,12 @@ private void scheduleServerChannel(NioServerSocketChannel channel, AcceptingSele } } + private void setContexts(NioSocketChannel channel) { + contextSetter.accept(channel); + assert channel.getReadContext() != null : "read context should have been set on channel"; + assert channel.getWriteContext() != null : "write context should have been set on channel"; + } + static class RawChannelFactory { private final boolean tcpNoDelay; @@ -105,12 +123,13 @@ static class RawChannelFactory { private final int tcpSendBufferSize; private final int tcpReceiveBufferSize; - RawChannelFactory(TcpTransport.ProfileSettings profileSettings) { - tcpNoDelay = profileSettings.tcpNoDelay; - tcpKeepAlive = profileSettings.tcpKeepAlive; - tcpReusedAddress = profileSettings.reuseAddress; - tcpSendBufferSize = Math.toIntExact(profileSettings.sendBufferSize.getBytes()); - tcpReceiveBufferSize = Math.toIntExact(profileSettings.receiveBufferSize.getBytes()); + RawChannelFactory(boolean tcpNoDelay, boolean tcpKeepAlive, boolean tcpReusedAddress, int tcpSendBufferSize, + int tcpReceiveBufferSize) { + this.tcpNoDelay = tcpNoDelay; + this.tcpKeepAlive = tcpKeepAlive; + this.tcpReusedAddress = tcpReusedAddress; + this.tcpSendBufferSize = tcpSendBufferSize; + this.tcpReceiveBufferSize = tcpReceiveBufferSize; } SocketChannel openNioChannel(InetSocketAddress remoteAddress) throws IOException { diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/ChannelFactoryTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/ChannelFactoryTests.java index 8851c37f2012e..710f26bedcf39 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/ChannelFactoryTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/ChannelFactoryTests.java @@ -26,6 +26,8 @@ import org.elasticsearch.transport.nio.TcpReadHandler; import org.junit.After; import org.junit.Before; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import java.io.IOException; import java.net.InetAddress; @@ -36,6 +38,7 @@ import static org.mockito.Matchers.any; import static org.mockito.Matchers.same; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -55,12 +58,19 @@ public class ChannelFactoryTests extends ESTestCase { @SuppressWarnings("unchecked") public void setupFactory() throws IOException { rawChannelFactory = mock(ChannelFactory.RawChannelFactory.class); - channelFactory = new ChannelFactory(rawChannelFactory, mock(TcpReadHandler.class)); + Consumer contextSetter = mock(Consumer.class); + channelFactory = new ChannelFactory(rawChannelFactory, contextSetter); listener = mock(Consumer.class); socketSelector = mock(SocketSelector.class); acceptingSelector = mock(AcceptingSelector.class); rawChannel = SocketChannel.open(); rawServerChannel = ServerSocketChannel.open(); + + doAnswer(invocationOnMock -> { + NioSocketChannel channel = (NioSocketChannel) invocationOnMock.getArguments()[0]; + channel.setContexts(mock(ReadContext.class), mock(WriteContext.class)); + return null; + }).when(contextSetter).accept(any()); } @After From 11dc3a342413eafa68ec053a79119167383c9234 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 9 Nov 2017 08:09:14 +0000 Subject: [PATCH 14/19] Remove optimisations to reuse objects when applying a new `ClusterState` (#27317) In order to avoid churn when applying a new `ClusterState`, there are some checks that compare parts of the old and new states and, if equal, the new object is discarded and the old one reused. Since `ClusterState` updates are now largely diff-based, this code is unnecessary: applying a diff also reuses any old objects if unchanged. Moreover, the code compares the parts of the `ClusterState` using their `version()` values which is not guaranteed to be correct, because of a lack of consensus. This change removes this optimisation, and tests that objects are still reused as expected via the diff mechanism. --- .../discovery/zen/ZenDiscovery.java | 44 ++------------ .../ClusterSerializationTests.java | 57 +++++++++++++++++++ 2 files changed, 63 insertions(+), 38 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 3082533363d0c..e079e827cd1f0 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -735,7 +735,6 @@ boolean processNextCommittedClusterState(String reason) { final ClusterState newClusterState = pendingStatesQueue.getNextClusterStateToProcess(); final ClusterState currentState = committedState.get(); - final ClusterState adaptedNewClusterState; // all pending states have been processed if (newClusterState == null) { return false; @@ -773,54 +772,23 @@ boolean processNextCommittedClusterState(String reason) { if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) { // its a fresh update from the master as we transition from a start of not having a master to having one logger.debug("got first state from fresh master [{}]", newClusterState.nodes().getMasterNodeId()); - adaptedNewClusterState = newClusterState; - } else if (newClusterState.nodes().isLocalNodeElectedMaster() == false) { - // some optimizations to make sure we keep old objects where possible - ClusterState.Builder builder = ClusterState.builder(newClusterState); - - // if the routing table did not change, use the original one - if (newClusterState.routingTable().version() == currentState.routingTable().version()) { - builder.routingTable(currentState.routingTable()); - } - // same for metadata - if (newClusterState.metaData().version() == currentState.metaData().version()) { - builder.metaData(currentState.metaData()); - } else { - // if its not the same version, only copy over new indices or ones that changed the version - MetaData.Builder metaDataBuilder = MetaData.builder(newClusterState.metaData()).removeAllIndices(); - for (IndexMetaData indexMetaData : newClusterState.metaData()) { - IndexMetaData currentIndexMetaData = currentState.metaData().index(indexMetaData.getIndex()); - if (currentIndexMetaData != null && currentIndexMetaData.isSameUUID(indexMetaData.getIndexUUID()) && - currentIndexMetaData.getVersion() == indexMetaData.getVersion()) { - // safe to reuse - metaDataBuilder.put(currentIndexMetaData, false); - } else { - metaDataBuilder.put(indexMetaData, false); - } - } - builder.metaData(metaDataBuilder); - } - - adaptedNewClusterState = builder.build(); - } else { - adaptedNewClusterState = newClusterState; } - if (currentState == adaptedNewClusterState) { + if (currentState == newClusterState) { return false; } - committedState.set(adaptedNewClusterState); + committedState.set(newClusterState); // update failure detection only after the state has been updated to prevent race condition with handleLeaveRequest // and handleNodeFailure as those check the current state to determine whether the failure is to be handled by this node - if (adaptedNewClusterState.nodes().isLocalNodeElectedMaster()) { + if (newClusterState.nodes().isLocalNodeElectedMaster()) { // update the set of nodes to ping - nodesFD.updateNodesAndPing(adaptedNewClusterState); + nodesFD.updateNodesAndPing(newClusterState); } else { // check to see that we monitor the correct master of the cluster - if (masterFD.masterNode() == null || !masterFD.masterNode().equals(adaptedNewClusterState.nodes().getMasterNode())) { - masterFD.restart(adaptedNewClusterState.nodes().getMasterNode(), + if (masterFD.masterNode() == null || !masterFD.masterNode().equals(newClusterState.nodes().getMasterNode())) { + masterFD.restart(newClusterState.nodes().getMasterNode(), "new cluster state received and we are monitoring the wrong master [" + masterFD.masterNode() + "]"); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 72612a238574a..de221a70b92d9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; @@ -42,6 +43,8 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; +import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; @@ -161,4 +164,58 @@ public void testSnapshotDeletionsInProgressSerialization() throws Exception { assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), nullValue()); } + private ClusterState updateUsingSerialisedDiff(ClusterState original, Diff diff) throws IOException { + BytesStreamOutput outStream = new BytesStreamOutput(); + outStream.setVersion(Version.CURRENT); + diff.writeTo(outStream); + StreamInput inStream = new NamedWriteableAwareStreamInput(outStream.bytes().streamInput(), + new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); + diff = ClusterState.readDiffFrom(inStream, newNode("node-name")); + return diff.apply(original); + } + + public void testObjectReuseWhenApplyingClusterStateDiff() throws Exception { + IndexMetaData indexMetaData + = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1).build(); + IndexTemplateMetaData indexTemplateMetaData + = IndexTemplateMetaData.builder("test-template").patterns(new ArrayList<>()).build(); + MetaData metaData = MetaData.builder().put(indexMetaData, true).put(indexTemplateMetaData).build(); + + RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build(); + + ClusterState clusterState1 = ClusterState.builder(new ClusterName("clusterName1")) + .metaData(metaData).routingTable(routingTable).build(); + BytesStreamOutput outStream = new BytesStreamOutput(); + outStream.setVersion(Version.CURRENT); + clusterState1.writeTo(outStream); + StreamInput inStream = new NamedWriteableAwareStreamInput(outStream.bytes().streamInput(), + new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); + ClusterState serializedClusterState1 = ClusterState.readFrom(inStream, newNode("node4")); + + // Create a new, albeit equal, IndexMetadata object + ClusterState clusterState2 = ClusterState.builder(clusterState1).incrementVersion() + .metaData(MetaData.builder().put(IndexMetaData.builder(indexMetaData).numberOfReplicas(1).build(), true)).build(); + assertNotSame("Should have created a new, equivalent, IndexMetaData object in clusterState2", + clusterState1.metaData().index("test"), clusterState2.metaData().index("test")); + + ClusterState serializedClusterState2 = updateUsingSerialisedDiff(serializedClusterState1, clusterState2.diff(clusterState1)); + assertSame("Unchanged metadata should not create new IndexMetaData objects", + serializedClusterState1.metaData().index("test"), serializedClusterState2.metaData().index("test")); + assertSame("Unchanged routing table should not create new IndexRoutingTable objects", + serializedClusterState1.routingTable().index("test"), serializedClusterState2.routingTable().index("test")); + + // Create a new and different IndexMetadata object + ClusterState clusterState3 = ClusterState.builder(clusterState1).incrementVersion() + .metaData(MetaData.builder().put(IndexMetaData.builder(indexMetaData).numberOfReplicas(2).build(), true)).build(); + ClusterState serializedClusterState3 = updateUsingSerialisedDiff(serializedClusterState2, clusterState3.diff(clusterState2)); + assertNotEquals("Should have a new IndexMetaData object", + serializedClusterState2.metaData().index("test"), serializedClusterState3.metaData().index("test")); + assertSame("Unchanged routing table should not create new IndexRoutingTable objects", + serializedClusterState2.routingTable().index("test"), serializedClusterState3.routingTable().index("test")); + + assertSame("nodes", serializedClusterState2.nodes(), serializedClusterState3.nodes()); + assertSame("blocks", serializedClusterState2.blocks(), serializedClusterState3.blocks()); + assertSame("template", serializedClusterState2.metaData().templates().get("test-template"), + serializedClusterState3.metaData().templates().get("test-template")); + } } From 9d468af9c29ac2e884816393f0d1a65ae39d0615 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 9 Nov 2017 10:45:44 +0000 Subject: [PATCH 15/19] Improve error message for parse failures of completion fields (#27297) Fix spacing/grammar/punctuation, and include the field name and location in the source document. --- .../index/mapper/CompletionFieldMapper.java | 4 ++-- .../mapper/CompletionFieldMapperTests.java | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 1c92150676c19..186334c85cb33 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -30,9 +30,9 @@ import org.apache.lucene.search.suggest.document.PrefixCompletionQuery; import org.apache.lucene.search.suggest.document.RegexCompletionQuery; import org.apache.lucene.search.suggest.document.SuggestField; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.set.Sets; @@ -560,7 +560,7 @@ private void parse(ParseContext parseContext, Token token, XContentParser parser } } } else { - throw new ElasticsearchParseException("failed to parse expected text or object got" + token.name()); + throw new ParsingException(parser.getTokenLocation(), "failed to parse [" + parser.currentName() + "]: expected text or object, but got " + token.name()); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index 5da524b69c0e6..74183ae864a60 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -163,6 +163,25 @@ public void testParsingMinimal() throws Exception { assertSuggestFields(fields, 1); } + public void testParsingFailure() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("completion") + .field("type", "completion") + .endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + + MapperParsingException e = expectThrows(MapperParsingException.class, () -> + defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() + .startObject() + .field("completion", 1.0) + .endObject() + .bytes(), + XContentType.JSON))); + assertEquals("failed to parse [completion]: expected text or object, but got VALUE_NUMBER", e.getCause().getMessage()); + } + public void testParsingMultiValued() throws Exception { String mapping = jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("completion") From b866e07f45433925fdd4ae042cb9592930324494 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 9 Nov 2017 13:25:51 +0100 Subject: [PATCH 16/19] Update to AWS SDK 1.11.223 (#27278) --- docs/plugins/repository-s3.asciidoc | 2 +- plugins/repository-s3/build.gradle | 11 ++++++++++- .../licenses/aws-java-sdk-core-1.10.69.jar.sha1 | 1 - .../aws-java-sdk-core-1.11.223.jar.sha1 | 1 + .../licenses/aws-java-sdk-kms-1.10.69.jar.sha1 | 1 - .../licenses/aws-java-sdk-kms-1.11.223.jar.sha1 | 1 + .../licenses/aws-java-sdk-s3-1.10.69.jar.sha1 | 1 - .../licenses/aws-java-sdk-s3-1.11.223.jar.sha1 | 1 + .../repositories/s3/AmazonS3Wrapper.java | 4 ++-- .../repositories/s3/AwsS3ServiceImplTests.java | 17 ++++++++--------- 10 files changed, 24 insertions(+), 16 deletions(-) delete mode 100644 plugins/repository-s3/licenses/aws-java-sdk-core-1.10.69.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-java-sdk-kms-1.10.69.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-java-sdk-s3-1.10.69.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 8191b00f9d92d..cb7cc67ddbce9 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -111,7 +111,7 @@ are marked as `Secure`. `use_throttle_retries`:: - Whether retries should be throttled (ie use backoff). Must be `true` or `false`. Defaults to `false`. + Whether retries should be throttled (ie use backoff). Must be `true` or `false`. Defaults to `true`. [[repository-s3-repository]] ==== Repository Settings diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index e8c7326dee97b..7071307fbc3c2 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -23,7 +23,7 @@ esplugin { } versions << [ - 'aws': '1.10.69' + 'aws': '1.11.223' ] dependencies { @@ -76,6 +76,15 @@ thirdPartyAudit.excludes = [ 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', + 'software.amazon.ion.IonReader', + 'software.amazon.ion.IonSystem', + 'software.amazon.ion.IonType', + 'software.amazon.ion.IonWriter', + 'software.amazon.ion.Timestamp', + 'software.amazon.ion.system.IonBinaryWriterBuilder', + 'software.amazon.ion.system.IonSystemBuilder', + 'software.amazon.ion.system.IonTextWriterBuilder', + 'software.amazon.ion.system.IonWriterBuilder', ] // jarhell with jdk (intentionally, because jaxb was removed from default modules in java 9) diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.10.69.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.10.69.jar.sha1 deleted file mode 100644 index 2971a33d7d91b..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-core-1.10.69.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1f02d5f26ba1d8c37e2bf9c847db3c6729dda00 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 new file mode 100644 index 0000000000000..9890dd8d600b3 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 @@ -0,0 +1 @@ +c3993cb44f5856fa721b7b7ccfc266377c0bf9c0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.10.69.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.10.69.jar.sha1 deleted file mode 100644 index 22e1f924664fa..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.10.69.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed74ff3872193b4704a751f0b72ab2cf0db0651b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 new file mode 100644 index 0000000000000..d5bc9d30308dc --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 @@ -0,0 +1 @@ +c24e6ebe108c60a08098aeaad5ae0b6a5a77b618 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.10.69.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.10.69.jar.sha1 deleted file mode 100644 index 64e7336c2bc91..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.10.69.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6fa48bf0bff43f26436956b88d8d3764b6cf109e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 new file mode 100644 index 0000000000000..fe12b2d4847fa --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 @@ -0,0 +1 @@ +c2ef96732e22d97952fbcd0a94f1dc376d157eda \ No newline at end of file diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java index 2ce64d5c6fc83..bcab130e7d531 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java @@ -24,6 +24,7 @@ import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.HttpMethod; import com.amazonaws.regions.Region; +import com.amazonaws.services.s3.AbstractAmazonS3; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.S3ClientOptions; import com.amazonaws.services.s3.S3ResponseMetadata; @@ -118,7 +119,7 @@ import java.util.List; @SuppressForbidden(reason = "implements AWS api that uses java.io.File!") -public class AmazonS3Wrapper implements AmazonS3 { +public class AmazonS3Wrapper extends AbstractAmazonS3 { protected AmazonS3 delegate; @@ -126,7 +127,6 @@ public AmazonS3Wrapper(AmazonS3 delegate) { this.delegate = delegate; } - @Override public void setEndpoint(String endpoint) { delegate.setEndpoint(endpoint); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 9863402ec37c1..f85f2eb6f322f 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -95,8 +95,8 @@ private void assertCredentials(Settings singleRepositorySettings, Settings setti } public void testAWSDefaultConfiguration() { - launchAWSConfigurationTest(Settings.EMPTY, Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, 3, false, - ClientConfiguration.DEFAULT_SOCKET_TIMEOUT); + launchAWSConfigurationTest(Settings.EMPTY, Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, 3, + ClientConfiguration.DEFAULT_THROTTLE_RETRIES, ClientConfiguration.DEFAULT_SOCKET_TIMEOUT); } public void testAWSConfigurationWithAwsSettings() { @@ -111,7 +111,7 @@ public void testAWSConfigurationWithAwsSettings() { .put("s3.client.default.read_timeout", "10s") .build(); launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", - "aws_proxy_password", 3, false, 10000); + "aws_proxy_password", 3, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, 10000); } public void testRepositoryMaxRetries() { @@ -119,15 +119,14 @@ public void testRepositoryMaxRetries() { .put("s3.client.default.max_retries", 5) .build(); launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null, - null, 5, false, 50000); + null, 5, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, 50000); } public void testRepositoryThrottleRetries() { - Settings settings = Settings.builder() - .put("s3.client.default.use_throttle_retries", true) - .build(); - launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null, - null, 3, true, 50000); + final boolean throttling = randomBoolean(); + + Settings settings = Settings.builder().put("s3.client.default.use_throttle_retries", throttling).build(); + launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, 3, throttling, 50000); } private void launchAWSConfigurationTest(Settings settings, From e09189f1c4be05d8706609205cf04860320cdd82 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 9 Nov 2017 15:15:17 +0100 Subject: [PATCH 17/19] Increase logging on qa:mixed-cluster tests Hopefully helps to figure out why the nodes have trouble starting up. --- qa/mixed-cluster/build.gradle | 3 +++ 1 file changed, 3 insertions(+) diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 781a69684e5d4..59a6dfece5278 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -45,6 +45,9 @@ for (Version version : wireCompatVersions) { numNodes = 4 numBwcNodes = 2 bwcVersion = version + + setting 'logger.level', 'DEBUG' + setting 'logger.org.elasticsearch.discovery', 'TRACE' } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { From 15a890b71f93206bc10fdd7e1662fd7f9a0edaae Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Thu, 9 Nov 2017 09:45:32 -0800 Subject: [PATCH 18/19] Introduce templating support to timezone/locale in DateProcessor (#27089) Sometimes systems like Beats would want to extract the date's timezone and/or locale from a value in a field of the document. This PR adds support for mustache templating to extract these values. Closes #24024. --- docs/reference/ingest/ingest-node.asciidoc | 24 +++++++ .../ingest/common/DateProcessor.java | 49 +++++++++---- .../ingest/common/IngestCommonPlugin.java | 2 +- .../common/DateProcessorFactoryTests.java | 68 ++++--------------- .../ingest/common/DateProcessorTests.java | 64 ++++++++++++++--- 5 files changed, 128 insertions(+), 79 deletions(-) diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 74cfabbff47a1..720a180934324 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -852,6 +852,30 @@ Here is an example that adds the parsed date to the `timestamp` field based on t -------------------------------------------------- // NOTCONSOLE +The `timezone` and `locale` processor parameters are templated. This means that their values can be +extracted from fields within documents. The example below shows how to extract the locale/timezone +details from existing fields, `my_timezone` and `my_locale`, in the ingested document that contain +the timezone and locale values. + +[source,js] +-------------------------------------------------- +{ + "description" : "...", + "processors" : [ + { + "date" : { + "field" : "initial_date", + "target_field" : "timestamp", + "formats" : ["ISO8601"], + "timezone" : "{{ my_timezone }}", + "locale" : "{{ my_locale }}" + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + [[date-index-name-processor]] === Date Index Name Processor diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index f1e7dcdcf55b0..4a9654f8cd0fe 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -20,11 +20,14 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.TemplateScript; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.ISODateTimeFormat; @@ -40,14 +43,15 @@ public final class DateProcessor extends AbstractProcessor { public static final String TYPE = "date"; static final String DEFAULT_TARGET_FIELD = "@timestamp"; - private final DateTimeZone timezone; - private final Locale locale; + private final TemplateScript.Factory timezone; + private final TemplateScript.Factory locale; private final String field; private final String targetField; private final List formats; - private final List> dateParsers; + private final List, Function>> dateParsers; - DateProcessor(String tag, DateTimeZone timezone, Locale locale, String field, List formats, String targetField) { + DateProcessor(String tag, @Nullable TemplateScript.Factory timezone, @Nullable TemplateScript.Factory locale, + String field, List formats, String targetField) { super(tag); this.timezone = timezone; this.locale = locale; @@ -57,10 +61,18 @@ public final class DateProcessor extends AbstractProcessor { this.dateParsers = new ArrayList<>(this.formats.size()); for (String format : formats) { DateFormat dateFormat = DateFormat.fromString(format); - dateParsers.add(dateFormat.getFunction(format, timezone, locale)); + dateParsers.add((params) -> dateFormat.getFunction(format, newDateTimeZone(params), newLocale(params))); } } + private DateTimeZone newDateTimeZone(Map params) { + return timezone == null ? DateTimeZone.UTC : DateTimeZone.forID(timezone.newInstance(params).execute()); + } + + private Locale newLocale(Map params) { + return (locale == null) ? Locale.ROOT : LocaleUtils.parse(locale.newInstance(params).execute()); + } + @Override public void execute(IngestDocument ingestDocument) { Object obj = ingestDocument.getFieldValue(field, Object.class); @@ -72,9 +84,9 @@ public void execute(IngestDocument ingestDocument) { DateTime dateTime = null; Exception lastException = null; - for (Function dateParser : dateParsers) { + for (Function, Function> dateParser : dateParsers) { try { - dateTime = dateParser.apply(value); + dateTime = dateParser.apply(ingestDocument.getSourceAndMetadata()).apply(value); } catch (Exception e) { //try the next parser and keep track of the exceptions lastException = ExceptionsHelper.useOrSuppress(lastException, e); @@ -93,11 +105,11 @@ public String getType() { return TYPE; } - DateTimeZone getTimezone() { + TemplateScript.Factory getTimezone() { return timezone; } - Locale getLocale() { + TemplateScript.Factory getLocale() { return locale; } @@ -115,19 +127,30 @@ List getFormats() { public static final class Factory implements Processor.Factory { + private final ScriptService scriptService; + + public Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + public DateProcessor create(Map registry, String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", DEFAULT_TARGET_FIELD); String timezoneString = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "timezone"); - DateTimeZone timezone = timezoneString == null ? DateTimeZone.UTC : DateTimeZone.forID(timezoneString); + TemplateScript.Factory compiledTimezoneTemplate = null; + if (timezoneString != null) { + compiledTimezoneTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, + "timezone", timezoneString, scriptService); + } String localeString = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "locale"); - Locale locale = Locale.ROOT; + TemplateScript.Factory compiledLocaleTemplate = null; if (localeString != null) { - locale = LocaleUtils.parse(localeString); + compiledLocaleTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, + "locale", localeString, scriptService); } List formats = ConfigurationUtils.readList(TYPE, processorTag, config, "formats"); - return new DateProcessor(processorTag, timezone, locale, field, formats, targetField); + return new DateProcessor(processorTag, compiledTimezoneTemplate, compiledLocaleTemplate, field, formats, targetField); } } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index 245ddc32c071d..0182e290d72b4 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -70,7 +70,7 @@ public IngestCommonPlugin() throws IOException { @Override public Map getProcessors(Processor.Parameters parameters) { Map processors = new HashMap<>(); - processors.put(DateProcessor.TYPE, new DateProcessor.Factory()); + processors.put(DateProcessor.TYPE, new DateProcessor.Factory(parameters.scriptService)); processors.put(SetProcessor.TYPE, new SetProcessor.Factory(parameters.scriptService)); processors.put(AppendProcessor.TYPE, new AppendProcessor.Factory(parameters.scriptService)); processors.put(RenameProcessor.TYPE, new RenameProcessor.Factory()); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java index f722f658bd1ff..2cf11f6d215d0 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java @@ -20,8 +20,10 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; import org.joda.time.DateTimeZone; +import org.junit.Before; import java.util.Arrays; import java.util.Collections; @@ -34,8 +36,14 @@ public class DateProcessorFactoryTests extends ESTestCase { + private DateProcessor.Factory factory; + + @Before + public void init() { + factory = new DateProcessor.Factory(TestTemplateService.instance()); + } + public void testBuildDefaults() throws Exception { - DateProcessor.Factory factory = new DateProcessor.Factory(); Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); @@ -46,12 +54,11 @@ public void testBuildDefaults() throws Exception { assertThat(processor.getField(), equalTo(sourceField)); assertThat(processor.getTargetField(), equalTo(DateProcessor.DEFAULT_TARGET_FIELD)); assertThat(processor.getFormats(), equalTo(Collections.singletonList("dd/MM/yyyyy"))); - assertThat(processor.getLocale(), equalTo(Locale.ROOT)); - assertThat(processor.getTimezone(), equalTo(DateTimeZone.UTC)); + assertNull(processor.getLocale()); + assertNull(processor.getTimezone()); } public void testMatchFieldIsMandatory() throws Exception { - DateProcessor.Factory factory = new DateProcessor.Factory(); Map config = new HashMap<>(); String targetField = randomAlphaOfLengthBetween(1, 10); config.put("target_field", targetField); @@ -66,7 +73,6 @@ public void testMatchFieldIsMandatory() throws Exception { } public void testMatchFormatsIsMandatory() throws Exception { - DateProcessor.Factory factory = new DateProcessor.Factory(); Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); String targetField = randomAlphaOfLengthBetween(1, 10); @@ -82,7 +88,6 @@ public void testMatchFormatsIsMandatory() throws Exception { } public void testParseLocale() throws Exception { - DateProcessor.Factory factory = new DateProcessor.Factory(); Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); @@ -91,39 +96,10 @@ public void testParseLocale() throws Exception { config.put("locale", locale.toLanguageTag()); DateProcessor processor = factory.create(null, null, config); - assertThat(processor.getLocale().toLanguageTag(), equalTo(locale.toLanguageTag())); - } - - public void testParseInvalidLocale() throws Exception { - String[] locales = new String[] { "invalid_locale", "english", "xy", "xy-US" }; - for (String locale : locales) { - DateProcessor.Factory factory = new DateProcessor.Factory(); - Map config = new HashMap<>(); - String sourceField = randomAlphaOfLengthBetween(1, 10); - config.put("field", sourceField); - config.put("formats", Collections.singletonList("dd/MM/yyyyy")); - config.put("locale", locale); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> factory.create(null, null, config)); - assertThat(e.getMessage(), equalTo("Unknown language: " + locale.split("[_-]")[0])); - } - - locales = new String[] { "en-XY", "en-Canada" }; - for (String locale : locales) { - DateProcessor.Factory factory = new DateProcessor.Factory(); - Map config = new HashMap<>(); - String sourceField = randomAlphaOfLengthBetween(1, 10); - config.put("field", sourceField); - config.put("formats", Collections.singletonList("dd/MM/yyyyy")); - config.put("locale", locale); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> factory.create(null, null, config)); - assertThat(e.getMessage(), equalTo("Unknown country: " + locale.split("[_-]")[1])); - } + assertThat(processor.getLocale().newInstance(Collections.emptyMap()).execute(), equalTo(locale.toLanguageTag())); } public void testParseTimezone() throws Exception { - DateProcessor.Factory factory = new DateProcessor.Factory(); Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); @@ -132,26 +108,10 @@ public void testParseTimezone() throws Exception { DateTimeZone timezone = randomDateTimeZone(); config.put("timezone", timezone.getID()); DateProcessor processor = factory.create(null, null, config); - assertThat(processor.getTimezone(), equalTo(timezone)); - } - - public void testParseInvalidTimezone() throws Exception { - DateProcessor.Factory factory = new DateProcessor.Factory(); - Map config = new HashMap<>(); - String sourceField = randomAlphaOfLengthBetween(1, 10); - config.put("field", sourceField); - config.put("match_formats", Collections.singletonList("dd/MM/yyyyy")); - config.put("timezone", "invalid_timezone"); - try { - factory.create(null, null, config); - fail("invalid timezone should fail"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("The datetime zone id 'invalid_timezone' is not recognised")); - } + assertThat(processor.getTimezone().newInstance(Collections.emptyMap()).execute(), equalTo(timezone.getID())); } public void testParseMatchFormats() throws Exception { - DateProcessor.Factory factory = new DateProcessor.Factory(); Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); @@ -162,7 +122,6 @@ public void testParseMatchFormats() throws Exception { } public void testParseMatchFormatsFailure() throws Exception { - DateProcessor.Factory factory = new DateProcessor.Factory(); Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); @@ -177,7 +136,6 @@ public void testParseMatchFormatsFailure() throws Exception { } public void testParseTargetField() throws Exception { - DateProcessor.Factory factory = new DateProcessor.Factory(); Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); String targetField = randomAlphaOfLengthBetween(1, 10); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index cc68340ec59f4..8fba759aa16f9 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -21,6 +21,8 @@ import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.ingest.TestTemplateService; +import org.elasticsearch.script.TemplateScript; import org.elasticsearch.test.ESTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -34,11 +36,19 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.joda.time.DateTimeZone.UTC; public class DateProcessorTests extends ESTestCase { + private TemplateScript.Factory templatize(Locale locale) { + return new TestTemplateService.MockTemplateScript.Factory(locale.getLanguage()); + } + private TemplateScript.Factory templatize(DateTimeZone timezone) { + return new TestTemplateService.MockTemplateScript.Factory(timezone.getID()); + } public void testJodaPattern() { - DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), DateTimeZone.forID("Europe/Amsterdam"), Locale.ENGLISH, + DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), + templatize(DateTimeZone.forID("Europe/Amsterdam")), templatize(Locale.ENGLISH), "date_as_string", Collections.singletonList("yyyy dd MM hh:mm:ss"), "date_as_date"); Map document = new HashMap<>(); document.put("date_as_string", "2010 12 06 11:05:15"); @@ -52,7 +62,8 @@ public void testJodaPatternMultipleFormats() { matchFormats.add("yyyy dd MM"); matchFormats.add("dd/MM/yyyy"); matchFormats.add("dd-MM-yyyy"); - DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), DateTimeZone.forID("Europe/Amsterdam"), Locale.ENGLISH, + DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), + templatize(DateTimeZone.forID("Europe/Amsterdam")), templatize(Locale.ENGLISH), "date_as_string", matchFormats, "date_as_date"); Map document = new HashMap<>(); @@ -86,16 +97,22 @@ public void testJodaPatternMultipleFormats() { public void testInvalidJodaPattern() { try { - new DateProcessor(randomAlphaOfLength(10), DateTimeZone.UTC, randomLocale(random()), + DateProcessor processor = new DateProcessor(randomAlphaOfLength(10), + templatize(UTC), templatize(randomLocale(random())), "date_as_string", Collections.singletonList("invalid pattern"), "date_as_date"); - fail("date processor initialization should have failed"); + Map document = new HashMap<>(); + document.put("date_as_string", "2010"); + processor.execute(RandomDocumentPicks.randomIngestDocument(random(), document)); + fail("date processor execution should have failed"); } catch(IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Illegal pattern component: i")); + assertThat(e.getMessage(), equalTo("unable to parse date [2010]")); + assertThat(e.getCause().getMessage(), equalTo("Illegal pattern component: i")); } } public void testJodaPatternLocale() { - DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), DateTimeZone.forID("Europe/Amsterdam"), Locale.ITALIAN, + DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), + templatize(DateTimeZone.forID("Europe/Amsterdam")), templatize(Locale.ITALIAN), "date_as_string", Collections.singletonList("yyyy dd MMM"), "date_as_date"); Map document = new HashMap<>(); document.put("date_as_string", "2010 12 giugno"); @@ -105,7 +122,8 @@ public void testJodaPatternLocale() { } public void testJodaPatternDefaultYear() { - DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), DateTimeZone.forID("Europe/Amsterdam"), Locale.ENGLISH, + DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), + templatize(DateTimeZone.forID("Europe/Amsterdam")), templatize(Locale.ENGLISH), "date_as_string", Collections.singletonList("dd/MM"), "date_as_date"); Map document = new HashMap<>(); document.put("date_as_string", "12/06"); @@ -116,7 +134,8 @@ public void testJodaPatternDefaultYear() { } public void testTAI64N() { - DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), DateTimeZone.forOffsetHours(2), randomLocale(random()), + DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), templatize(DateTimeZone.forOffsetHours(2)), + templatize(randomLocale(random())), "date_as_string", Collections.singletonList("TAI64N"), "date_as_date"); Map document = new HashMap<>(); String dateAsString = (randomBoolean() ? "@" : "") + "4000000050d506482dbdf024"; @@ -127,7 +146,7 @@ public void testTAI64N() { } public void testUnixMs() { - DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), DateTimeZone.UTC, randomLocale(random()), + DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), templatize(UTC), templatize(randomLocale(random())), "date_as_string", Collections.singletonList("UNIX_MS"), "date_as_date"); Map document = new HashMap<>(); document.put("date_as_string", "1000500"); @@ -143,7 +162,8 @@ public void testUnixMs() { } public void testUnix() { - DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), DateTimeZone.UTC, randomLocale(random()), + DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), templatize(UTC), + templatize(randomLocale(random())), "date_as_string", Collections.singletonList("UNIX"), "date_as_date"); Map document = new HashMap<>(); document.put("date_as_string", "1000.5"); @@ -151,4 +171,28 @@ public void testUnix() { dateProcessor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo("1970-01-01T00:16:40.500Z")); } + + public void testInvalidTimezone() { + DateProcessor processor = new DateProcessor(randomAlphaOfLength(10), + new TestTemplateService.MockTemplateScript.Factory("invalid_timezone"), templatize(randomLocale(random())), + "date_as_string", Collections.singletonList("yyyy"), "date_as_date"); + Map document = new HashMap<>(); + document.put("date_as_string", "2010"); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> processor.execute(RandomDocumentPicks.randomIngestDocument(random(), document))); + assertThat(e.getMessage(), equalTo("unable to parse date [2010]")); + assertThat(e.getCause().getMessage(), equalTo("The datetime zone id 'invalid_timezone' is not recognised")); + } + + public void testInvalidLocale() { + DateProcessor processor = new DateProcessor(randomAlphaOfLength(10), + templatize(UTC), new TestTemplateService.MockTemplateScript.Factory("invalid_locale"), + "date_as_string", Collections.singletonList("yyyy"), "date_as_date"); + Map document = new HashMap<>(); + document.put("date_as_string", "2010"); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> processor.execute(RandomDocumentPicks.randomIngestDocument(random(), document))); + assertThat(e.getMessage(), equalTo("unable to parse date [2010]")); + assertThat(e.getCause().getMessage(), equalTo("Unknown language: invalid")); + } } From 0a50fca0ee1fca0c84aeba5815e3e3f98c0aa58e Mon Sep 17 00:00:00 2001 From: Guillaume Le Floch Date: Thu, 9 Nov 2017 19:16:44 +0100 Subject: [PATCH 19/19] Update Tika version to 1.15 This commit upgrades the Tika dependency to version 1.15. Relates #25003 --- plugins/ingest-attachment/build.gradle | 55 +++-- .../commons-collections4-4.1.jar.sha1 | 1 + .../licenses/commons-collections4-LICENSE.txt | 201 ++++++++++++++++++ .../licenses/commons-collections4-NOTICE.txt | 5 + .../licenses/commons-compress-1.10.jar.sha1 | 1 - .../licenses/commons-compress-1.14.jar.sha1 | 1 + .../licenses/poi-3.15.jar.sha1 | 1 - .../licenses/poi-3.16.jar.sha1 | 1 + .../licenses/poi-ooxml-3.15.jar.sha1 | 1 - .../licenses/poi-ooxml-3.16.jar.sha1 | 1 + .../licenses/poi-ooxml-schemas-3.15.jar.sha1 | 1 - .../licenses/poi-ooxml-schemas-3.16.jar.sha1 | 1 + .../licenses/poi-scratchpad-3.15.jar.sha1 | 1 - .../licenses/poi-scratchpad-3.16.jar.sha1 | 1 + .../licenses/slf4j-api-1.6.2.jar.sha1 | 1 + .../licenses/slf4j-api-LICENSE.txt | 21 ++ .../licenses/slf4j-api-NOTICE.txt | 0 .../licenses/tika-core-1.14.jar.sha1 | 1 - .../licenses/tika-core-1.15.jar.sha1 | 1 + .../licenses/tika-parsers-1.14.jar.sha1 | 1 - .../licenses/tika-parsers-1.15.jar.sha1 | 1 + .../licenses/xz-1.6.jar.sha1 | 1 + .../ingest-attachment/licenses/xz-LICENSE.txt | 9 + .../ingest-attachment/licenses/xz-NOTICE.txt | 1 + .../ingest/attachment/TikaImpl.java | 14 +- 25 files changed, 284 insertions(+), 39 deletions(-) create mode 100644 plugins/ingest-attachment/licenses/commons-collections4-4.1.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/commons-collections4-LICENSE.txt create mode 100644 plugins/ingest-attachment/licenses/commons-collections4-NOTICE.txt delete mode 100644 plugins/ingest-attachment/licenses/commons-compress-1.10.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/commons-compress-1.14.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-3.15.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-3.16.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-3.15.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-3.16.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.16.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-scratchpad-3.15.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-scratchpad-3.16.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt create mode 100644 plugins/ingest-attachment/licenses/slf4j-api-NOTICE.txt delete mode 100644 plugins/ingest-attachment/licenses/tika-core-1.14.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-core-1.15.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parsers-1.14.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parsers-1.15.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/xz-1.6.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/xz-LICENSE.txt create mode 100644 plugins/ingest-attachment/licenses/xz-NOTICE.txt diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index bdc01c0151e9e..b79501966e31d 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -23,10 +23,10 @@ esplugin { } versions << [ - 'tika': '1.14', + 'tika': '1.15', 'pdfbox': '2.0.3', 'bouncycastle': '1.55', - 'poi': '3.15', + 'poi': '3.16', 'mime4j': '0.7.2' ] @@ -34,7 +34,9 @@ dependencies { // mandatory for tika compile "org.apache.tika:tika-core:${versions.tika}" compile "org.apache.tika:tika-parsers:${versions.tika}" + compile 'org.tukaani:xz:1.6' compile 'commons-io:commons-io:2.4' + compile "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection compile 'com.googlecode.juniversalchardet:juniversalchardet:1.0.3' @@ -56,10 +58,11 @@ dependencies { compile "org.apache.poi:poi-ooxml-schemas:${versions.poi}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile 'org.apache.xmlbeans:xmlbeans:2.6.0' + compile 'org.apache.commons:commons-collections4:4.1' // MS Office compile "org.apache.poi:poi-scratchpad:${versions.poi}" // Apple iWork - compile 'org.apache.commons:commons-compress:1.10' + compile 'org.apache.commons:commons-compress:1.14' // Outlook documents compile "org.apache.james:apache-mime4j-core:${versions.mime4j}" compile "org.apache.james:apache-mime4j-dom:${versions.mime4j}" @@ -317,8 +320,6 @@ thirdPartyAudit.excludes = [ 'com.microsoft.schemas.office.powerpoint.CTRel', 'com.microsoft.schemas.office.visio.x2012.main.AttachedToolbarsType', 'com.microsoft.schemas.office.visio.x2012.main.ColorsType', - 'com.microsoft.schemas.office.visio.x2012.main.ConnectType', - 'com.microsoft.schemas.office.visio.x2012.main.ConnectsType', 'com.microsoft.schemas.office.visio.x2012.main.CpType', 'com.microsoft.schemas.office.visio.x2012.main.CustomMenusFileType', 'com.microsoft.schemas.office.visio.x2012.main.CustomToolbarsFileType', @@ -346,6 +347,7 @@ thirdPartyAudit.excludes = [ 'com.microsoft.schemas.office.visio.x2012.main.TpType', 'com.microsoft.schemas.office.visio.x2012.main.TriggerType', 'com.microsoft.schemas.office.visio.x2012.main.impl.CellTypeImpl$1RefByList', + 'com.microsoft.schemas.office.visio.x2012.main.impl.ConnectsTypeImpl$1ConnectList', 'com.microsoft.schemas.office.visio.x2012.main.impl.MastersTypeImpl$1MasterList', 'com.microsoft.schemas.office.visio.x2012.main.impl.MastersTypeImpl$1MasterShortcutList', 'com.microsoft.schemas.office.visio.x2012.main.impl.PagesTypeImpl$1PageList', @@ -481,6 +483,7 @@ thirdPartyAudit.excludes = [ 'com.pff.PSTFile', 'com.pff.PSTFolder', 'com.pff.PSTMessage', + 'com.pff.PSTRecipient', 'com.rometools.rome.feed.synd.SyndContent', 'com.rometools.rome.feed.synd.SyndEntry', 'com.rometools.rome.feed.synd.SyndFeed', @@ -511,13 +514,14 @@ thirdPartyAudit.excludes = [ 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', 'javax.ws.rs.core.Response', + 'javax.ws.rs.core.UriBuilder', 'junit.framework.TestCase', 'opennlp.tools.namefind.NameFinderME', 'opennlp.tools.namefind.TokenNameFinderModel', + 'opennlp.tools.sentiment.SentimentME', + 'opennlp.tools.sentiment.SentimentModel', 'opennlp.tools.util.Span', 'org.apache.avalon.framework.logger.Logger', - 'org.apache.commons.collections4.ListValuedMap', - 'org.apache.commons.collections4.multimap.ArrayListValuedHashMap', 'org.apache.commons.csv.CSVFormat', 'org.apache.commons.csv.CSVParser', 'org.apache.commons.csv.CSVRecord', @@ -526,6 +530,7 @@ thirdPartyAudit.excludes = [ 'org.apache.commons.exec.ExecuteWatchdog', 'org.apache.commons.exec.PumpStreamHandler', 'org.apache.commons.exec.environment.EnvironmentUtils', + 'org.apache.commons.lang.StringUtils', 'org.apache.ctakes.typesystem.type.refsem.UmlsConcept', 'org.apache.ctakes.typesystem.type.textsem.IdentifiedAnnotation', 'org.apache.cxf.jaxrs.client.WebClient', @@ -584,6 +589,7 @@ thirdPartyAudit.excludes = [ 'org.apache.xml.security.Init', 'org.apache.xml.security.c14n.Canonicalizer', 'org.apache.xml.security.utils.Base64', + 'org.brotli.dec.BrotliInputStream', 'org.etsi.uri.x01903.v13.AnyType', 'org.etsi.uri.x01903.v13.ClaimedRolesListType', 'org.etsi.uri.x01903.v13.CounterSignatureType', @@ -625,12 +631,15 @@ thirdPartyAudit.excludes = [ 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1RevocationValuesList', 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1SigAndRefsTimeStampList', 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1SignatureTimeStampList', + 'org.etsi.uri.x01903.v14.ValidationDataType$Factory', + 'org.etsi.uri.x01903.v14.ValidationDataType', 'org.json.JSONArray', 'org.json.JSONObject', 'org.json.XML', 'org.json.simple.JSONArray', 'org.json.simple.JSONObject', 'org.json.simple.parser.JSONParser', + 'org.junit.Assert', 'org.junit.Test', 'org.junit.internal.TextListener', 'org.junit.runner.JUnitCore', @@ -801,6 +810,7 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.drawingml.x2006.main.CTSupplementalFont', 'org.openxmlformats.schemas.drawingml.x2006.main.CTTableBackgroundStyle', 'org.openxmlformats.schemas.drawingml.x2006.main.CTTableCellBorderStyle', + 'org.openxmlformats.schemas.drawingml.x2006.main.CTTableStyleTextStyle', 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBlipBullet', 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBulletColorFollowText', 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBulletSizeFollowText', @@ -815,6 +825,7 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.drawingml.x2006.main.STBlackWhiteMode', 'org.openxmlformats.schemas.drawingml.x2006.main.STBlipCompression', 'org.openxmlformats.schemas.drawingml.x2006.main.STFixedAngle', + 'org.openxmlformats.schemas.drawingml.x2006.main.STOnOffStyleType$Enum', 'org.openxmlformats.schemas.drawingml.x2006.main.STPanose', 'org.openxmlformats.schemas.drawingml.x2006.main.STPathFillMode', 'org.openxmlformats.schemas.drawingml.x2006.main.STPresetPatternVal', @@ -1202,11 +1213,8 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTMissing', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTNumber', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTOleLink', - 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTOleObjects', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTOleSize', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPCDKPIs', - 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPageField', - 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPageFields', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPhoneticRun', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPivotFilters', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPivotHierarchies', @@ -1231,9 +1239,11 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTX', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STCellSpans', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STDataValidationImeMode', + 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STDvAspect', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STFieldSortType', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STGuid', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STObjects', + 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STOleUpdate', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STPhoneticAlignment', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STPhoneticType', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.STPrintError', @@ -1291,7 +1301,9 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTMapInfoImpl$1SchemaList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTMergeCellsImpl$1MergeCellList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTNumFmtsImpl$1NumFmtList', + 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTOleObjectsImpl$1OleObjectList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPageBreakImpl$1BrkList', + 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPageFieldsImpl$1PageFieldList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPivotCacheRecordsImpl$1RList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPivotCachesImpl$1PivotCacheList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPivotFieldsImpl$1PivotFieldList', @@ -1321,6 +1333,7 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSharedItemsImpl$1NList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSharedItemsImpl$1SList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSheetDataImpl$1RowList', + 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSheetsImpl$1SheetList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSheetViewImpl$1PivotSelectionList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSheetViewImpl$1SelectionList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTSheetViewsImpl$1SheetViewList', @@ -2035,26 +2048,10 @@ thirdPartyAudit.excludes = [ 'org.osgi.framework.ServiceRegistration', 'org.osgi.util.tracker.ServiceTracker', 'org.osgi.util.tracker.ServiceTrackerCustomizer', - 'org.slf4j.Logger', - 'org.slf4j.LoggerFactory', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', 'org.sqlite.SQLiteConfig', - 'org.tukaani.xz.ARMOptions', - 'org.tukaani.xz.ARMThumbOptions', - 'org.tukaani.xz.DeltaOptions', - 'org.tukaani.xz.FilterOptions', - 'org.tukaani.xz.FinishableWrapperOutputStream', - 'org.tukaani.xz.IA64Options', - 'org.tukaani.xz.LZMA2InputStream', - 'org.tukaani.xz.LZMA2Options', - 'org.tukaani.xz.LZMAInputStream', - 'org.tukaani.xz.PowerPCOptions', - 'org.tukaani.xz.SPARCOptions', - 'org.tukaani.xz.SingleXZInputStream', - 'org.tukaani.xz.UnsupportedOptionsException', - 'org.tukaani.xz.X86Options', - 'org.tukaani.xz.XZ', - 'org.tukaani.xz.XZInputStream', - 'org.tukaani.xz.XZOutputStream', 'org.w3.x2000.x09.xmldsig.KeyInfoType', 'org.w3.x2000.x09.xmldsig.SignatureMethodType', 'org.w3.x2000.x09.xmldsig.SignatureValueType', diff --git a/plugins/ingest-attachment/licenses/commons-collections4-4.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-collections4-4.1.jar.sha1 new file mode 100644 index 0000000000000..f054416580624 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-collections4-4.1.jar.sha1 @@ -0,0 +1 @@ +a4cf4688fe1c7e3a63aa636cc96d013af537768e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-collections4-LICENSE.txt b/plugins/ingest-attachment/licenses/commons-collections4-LICENSE.txt new file mode 100644 index 0000000000000..1796eb428ec8a --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-collections4-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-collections4-NOTICE.txt b/plugins/ingest-attachment/licenses/commons-collections4-NOTICE.txt new file mode 100644 index 0000000000000..688d6160e81f7 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-collections4-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons Collections +Copyright 2001-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.10.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.10.jar.sha1 deleted file mode 100644 index 65c74b9a88f09..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-compress-1.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5eeb27c57eece1faf2d837868aeccc94d84dcc9a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.14.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.14.jar.sha1 new file mode 100644 index 0000000000000..a93cac2243e69 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-compress-1.14.jar.sha1 @@ -0,0 +1 @@ +7b18320d668ab080758bf5383d6d8fcf750babce \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-3.15.jar.sha1 b/plugins/ingest-attachment/licenses/poi-3.15.jar.sha1 deleted file mode 100644 index 5405d32cd0108..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-3.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -965bba8899988008bb2341e300347de62aad5391 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-3.16.jar.sha1 b/plugins/ingest-attachment/licenses/poi-3.16.jar.sha1 new file mode 100644 index 0000000000000..75cbf23384761 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-3.16.jar.sha1 @@ -0,0 +1 @@ +ad21c123ee5d6b5b2a8f0d4ed23b3ffe6759a889 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-3.15.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-3.15.jar.sha1 deleted file mode 100644 index 4362223dac4a4..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-3.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e2800856735b07b8edd417aee07685470216a00f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-3.16.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-3.16.jar.sha1 new file mode 100644 index 0000000000000..c2283c7fa1a36 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-3.16.jar.sha1 @@ -0,0 +1 @@ +76e20fe22404cc4da55ddfdaaaadee32bbfa3bdd \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15.jar.sha1 deleted file mode 100644 index 393e6f885d9f2..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de4a50ca39de48a19606b35644ecadb2f733c479 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.16.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.16.jar.sha1 new file mode 100644 index 0000000000000..8ddbb3001691a --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.16.jar.sha1 @@ -0,0 +1 @@ +9828a49307fc6bebfd42185b677d88b6e4994c63 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-3.15.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-3.15.jar.sha1 deleted file mode 100644 index d08f475a3f60a..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-scratchpad-3.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1db76ae4a9389fa4339dc3b7f8208aa82c72b04 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-3.16.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-3.16.jar.sha1 new file mode 100644 index 0000000000000..8dc53c0bfbca2 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-scratchpad-3.16.jar.sha1 @@ -0,0 +1 @@ +69d6dda524e38a491b362d0f94ef74a514faf70a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 new file mode 100644 index 0000000000000..a2f93ea55802b --- /dev/null +++ b/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 @@ -0,0 +1 @@ +8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..52055e61de46f --- /dev/null +++ b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2014 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/plugins/ingest-attachment/licenses/slf4j-api-NOTICE.txt b/plugins/ingest-attachment/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingest-attachment/licenses/tika-core-1.14.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-1.14.jar.sha1 deleted file mode 100644 index 2da2c07d9b27f..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afff8f1774994aa973ef90bc8d38ddf089b9d6d9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-1.15.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-1.15.jar.sha1 new file mode 100644 index 0000000000000..cc764669b5bb1 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-1.15.jar.sha1 @@ -0,0 +1 @@ +17850c2224e4e3867e588060dc8ce6ba3bcfab2a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-1.14.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-1.14.jar.sha1 deleted file mode 100644 index b96b3cdefdd1a..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d26c10a9e7d116366562aa260013a30a55ff4e8f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-1.15.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-1.15.jar.sha1 new file mode 100644 index 0000000000000..cada2d9f3ac1b --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-1.15.jar.sha1 @@ -0,0 +1 @@ +aa07c2cda051709e5fe70fd6e244386fc93b0a1e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xz-1.6.jar.sha1 b/plugins/ingest-attachment/licenses/xz-1.6.jar.sha1 new file mode 100644 index 0000000000000..d91cd44c0b4d3 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xz-1.6.jar.sha1 @@ -0,0 +1 @@ +05b6f921f1810bdf90e25471968f741f87168b64 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xz-LICENSE.txt b/plugins/ingest-attachment/licenses/xz-LICENSE.txt new file mode 100644 index 0000000000000..9fde2b1659117 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xz-LICENSE.txt @@ -0,0 +1,9 @@ + +Licensing of XZ for Java +======================== + + All the files in this package have been written by Lasse Collin + and/or Igor Pavlov. All these files have been put into the + public domain. You can do whatever you want with these files. + + This software is provided "as is", without any warranty. diff --git a/plugins/ingest-attachment/licenses/xz-NOTICE.txt b/plugins/ingest-attachment/licenses/xz-NOTICE.txt new file mode 100644 index 0000000000000..d3f5a12faa997 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xz-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java index 4b8189449cf47..9c3366d160eb9 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java @@ -50,10 +50,10 @@ import java.security.ProtectionDomain; import java.security.SecurityPermission; import java.util.Arrays; -import java.util.Collections; +import java.util.HashSet; import java.util.LinkedHashSet; -import java.util.PropertyPermission; import java.util.Set; +import java.util.PropertyPermission; /** * Runs tika with limited parsers and limited permissions. @@ -63,7 +63,15 @@ final class TikaImpl { /** Exclude some formats */ - private static final Set EXCLUDES = Collections.singleton(MediaType.application("x-tika-ooxml")); + private static final Set EXCLUDES = new HashSet<>(Arrays.asList( + MediaType.application("vnd.ms-visio.drawing"), + MediaType.application("vnd.ms-visio.drawing.macroenabled.12"), + MediaType.application("vnd.ms-visio.stencil"), + MediaType.application("vnd.ms-visio.stencil.macroenabled.12"), + MediaType.application("vnd.ms-visio.template"), + MediaType.application("vnd.ms-visio.template.macroenabled.12"), + MediaType.application("vnd.ms-visio.drawing") + )); /** subset of parsers for types we support */ private static final Parser PARSERS[] = new Parser[] {