Skip to content

Commit

Permalink
Replace delimited_payload_filter by delimited_payload (#26625)
Browse files Browse the repository at this point in the history
The `delimited_payload_filter` is renamed to `delimited_payload`, the old name is 
deprecated and should be replaced by `delimited_payload`.

Closes #21978
  • Loading branch information
kel authored and cbuescher committed Nov 24, 2017
1 parent e8c2e5d commit 4885acb
Show file tree
Hide file tree
Showing 9 changed files with 105 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,7 @@ public String toString() {
};
}

@SuppressWarnings("unchecked")
private <T> Map<String, T> buildMapping(Component component, IndexSettings settings, Map<String, Settings> settingsMap,
Map<String, ? extends AnalysisModule.AnalysisProvider<T>> providerMap,
Map<String, ? extends AnalysisModule.AnalysisProvider<T>> defaultInstance) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,10 +152,10 @@ public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws IOExceptio
.field("analyzer", "payload_test").endObject().endObject().endObject().endObject();
Settings setting = Settings.builder()
.put("index.analysis.analyzer.payload_test.tokenizer", "whitespace")
.putList("index.analysis.analyzer.payload_test.filter", "my_delimited_payload_filter")
.put("index.analysis.filter.my_delimited_payload_filter.delimiter", delimiter)
.put("index.analysis.filter.my_delimited_payload_filter.encoding", encodingString)
.put("index.analysis.filter.my_delimited_payload_filter.type", "mock_payload_filter").build();
.putList("index.analysis.analyzer.payload_test.filter", "my_delimited_payload")
.put("index.analysis.filter.my_delimited_payload.delimiter", delimiter)
.put("index.analysis.filter.my_delimited_payload.encoding", encodingString)
.put("index.analysis.filter.my_delimited_payload.type", "mock_payload_filter").build();
createIndex("test", setting, "type1", mapping);

client().prepareIndex("test", "type1", Integer.toString(1))
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[[analysis-delimited-payload-tokenfilter]]
=== Delimited Payload Token Filter

Named `delimited_payload_filter`. Splits tokens into tokens and payload whenever a delimiter character is found.
Named `delimited_payload`. Splits tokens into tokens and payload whenever a delimiter character is found.

Example: "the|1 quick|2 fox|3" is split by default into tokens `the`, `quick`, and `fox` with payloads `1`, `2`, and `3` respectively.

Expand Down
1 change: 1 addition & 0 deletions docs/reference/migration/migrate_7_0.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ way to reindex old indices is to use the `reindex` API.
* <<breaking_70_mappings_changes>>
* <<breaking_70_search_changes>>
* <<breaking_70_plugins_changes>>
* <<breaking_70_analysis_changes>>
* <<breaking_70_api_changes>>


Expand Down
8 changes: 8 additions & 0 deletions docs/reference/migration/migrate_7_0/analysis.asciidoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
[[breaking_70_analysis_changes]]
=== Analysis changes

==== The `delimited_payload_filter` is renamed

The `delimited_payload_filter` is renamed to `delimited_payload`, the old name is
deprecated and will be removed at some point, so it should be replaced by
`delimited_payload`.
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
filters.put("czech_stem", CzechStemTokenFilterFactory::new);
filters.put("common_grams", requriesAnalysisSettings(CommonGramsTokenFilterFactory::new));
filters.put("decimal_digit", DecimalDigitFilterFactory::new);
filters.put("delimited_payload_filter", DelimitedPayloadTokenFilterFactory::new);
filters.put("delimited_payload_filter", LegacyDelimitedPayloadTokenFilterFactory::new);
filters.put("delimited_payload", DelimitedPayloadTokenFilterFactory::new);
filters.put("dictionary_decompounder", requriesAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new));
filters.put("dutch_stem", DutchStemTokenFilterFactory::new);
filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new);
Expand Down Expand Up @@ -195,6 +196,10 @@ public List<PreConfiguredTokenFilter> getPreConfiguredTokenFilters() {
new DelimitedPayloadTokenFilter(input,
DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER,
DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER)));
filters.add(PreConfiguredTokenFilter.singleton("delimited_payload", false, input ->
new DelimitedPayloadTokenFilter(input,
DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER,
DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER)));
filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer())));
filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input ->
new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE)));
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.analysis.common;

import org.elasticsearch.Version;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;

public class LegacyDelimitedPayloadTokenFilterFactory extends DelimitedPayloadTokenFilterFactory {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(LegacyDelimitedPayloadTokenFilterFactory.class));

LegacyDelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, env, name, settings);
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) {
DEPRECATION_LOGGER.deprecated("Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]");
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@ protected Map<String, Class<?>> getPreConfiguredTokenFilters() {
filters.put("czech_stem", null);
filters.put("decimal_digit", null);
filters.put("delimited_payload_filter", org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class);
filters.put("delimited_payload", org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class);
filters.put("dutch_stem", SnowballPorterFilterFactory.class);
filters.put("edge_ngram", null);
filters.put("edgeNGram", null);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1027,7 +1027,14 @@

---
"delimited_payload_filter":
- skip:
version: " - 6.99.99"
reason: delimited_payload_filter deprecated in 7.0, replaced by delimited_payload
features: "warnings"

- do:
warnings:
- "Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]"
indices.create:
index: test
body:
Expand All @@ -1039,6 +1046,8 @@
delimiter: ^
encoding: identity
- do:
warnings:
- "Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]"
indices.analyze:
index: test
body:
Expand All @@ -1050,6 +1059,8 @@

# Test pre-configured token filter too:
- do:
warnings:
- "Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]"
indices.analyze:
body:
text: foo|5
Expand All @@ -1058,6 +1069,39 @@
- length: { tokens: 1 }
- match: { tokens.0.token: foo }

---
"delimited_payload":
- do:
indices.create:
index: test
body:
settings:
analysis:
filter:
my_delimited_payload:
type: delimited_payload
delimiter: ^
encoding: identity
- do:
indices.analyze:
index: test
body:
text: foo^bar
tokenizer: keyword
filter: [my_delimited_payload]
- length: { tokens: 1 }
- match: { tokens.0.token: foo }

# Test pre-configured token filter too:
- do:
indices.analyze:
body:
text: foo|5
tokenizer: keyword
filter: [delimited_payload]
- length: { tokens: 1 }
- match: { tokens.0.token: foo }

---
"keep_filter":
- do:
Expand Down

0 comments on commit 4885acb

Please sign in to comment.