diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java index 5c5a82b52f438..340e14653971b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java @@ -24,6 +24,8 @@ import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.SimulatePipelineRequest; +import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.ingest.WritePipelineResponse; import java.io.IOException; @@ -125,4 +127,37 @@ public void deletePipelineAsync(DeletePipelineRequest request, RequestOptions op restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline, options, WritePipelineResponse::fromXContent, listener, emptySet()); } + + /** + * Simulate a pipeline on a set of documents provided in the request + *

+ * See + * + * Simulate Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public SimulatePipelineResponse simulatePipeline(SimulatePipelineRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::simulatePipeline, options, + SimulatePipelineResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously simulate a pipeline on a set of documents provided in the request + *

+ * See + * + * Simulate Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void simulatePipelineAsync(SimulatePipelineRequest request, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::simulatePipeline, options, + SimulatePipelineResponse::fromXContent, listener, emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index c6c53501e0dd6..3b92d09b8ed56 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -72,6 +72,7 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineRequest; +import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; @@ -927,6 +928,20 @@ static Request validateQuery(ValidateQueryRequest validateQueryRequest) throws I return request; } + static Request simulatePipeline(SimulatePipelineRequest simulatePipelineRequest) throws IOException { + EndpointBuilder builder = new EndpointBuilder().addPathPartAsIs("_ingest/pipeline"); + if (simulatePipelineRequest.getId() != null && !simulatePipelineRequest.getId().isEmpty()) { + builder.addPathPart(simulatePipelineRequest.getId()); + } + builder.addPathPartAsIs("_simulate"); + String endpoint = builder.build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + Params params = new Params(request); + params.putParam("verbose", Boolean.toString(simulatePipelineRequest.isVerbose())); + request.setEntity(createEntity(simulatePipelineRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request getAlias(GetAliasesRequest getAliasesRequest) { String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices(); String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index 14fe0e01d31f9..d9d57a49b4f8a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -123,9 +123,7 @@ private HighLevelClient(RestClient restClient) { } } - protected static XContentBuilder buildRandomXContentPipeline() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent()); + protected static XContentBuilder buildRandomXContentPipeline(XContentBuilder pipelineBuilder) throws IOException { pipelineBuilder.startObject(); { pipelineBuilder.field(Pipeline.DESCRIPTION_KEY, "some random set of processors"); @@ -152,6 +150,12 @@ protected static XContentBuilder buildRandomXContentPipeline() throws IOExceptio return pipelineBuilder; } + protected static XContentBuilder buildRandomXContentPipeline() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent()); + return buildRandomXContentPipeline(pipelineBuilder); + } + protected static void createPipeline(String pipelineId) throws IOException { XContentBuilder builder = buildRandomXContentPipeline(); createPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(builder), builder.contentType())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java index ecc0d0052d415..6fd6f95059577 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java @@ -23,12 +23,22 @@ import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.SimulateDocumentBaseResult; +import org.elasticsearch.action.ingest.SimulateDocumentResult; +import org.elasticsearch.action.ingest.SimulateDocumentVerboseResult; +import org.elasticsearch.action.ingest.SimulatePipelineRequest; +import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.ingest.PipelineConfiguration; import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.core.IsInstanceOf.instanceOf; public class IngestClientIT extends ESRestHighLevelClientTestCase { @@ -80,4 +90,93 @@ public void testDeletePipeline() throws IOException { execute(request, highLevelClient().ingest()::deletePipeline, highLevelClient().ingest()::deletePipelineAsync); assertTrue(response.isAcknowledged()); } + + public void testSimulatePipeline() throws IOException { + testSimulatePipeline(false, false); + } + + public void testSimulatePipelineWithFailure() throws IOException { + testSimulatePipeline(false, true); + } + + public void testSimulatePipelineVerbose() throws IOException { + testSimulatePipeline(true, false); + } + + public void testSimulatePipelineVerboseWithFailure() throws IOException { + testSimulatePipeline(true, true); + } + + private void testSimulatePipeline(boolean isVerbose, + boolean isFailure) throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); + String rankValue = isFailure ? "non-int" : Integer.toString(1234); + builder.startObject(); + { + builder.field("pipeline"); + buildRandomXContentPipeline(builder); + builder.startArray("docs"); + { + builder.startObject() + .field("_index", "index") + .field("_type", "doc") + .field("_id", "doc_" + 1) + .startObject("_source").field("foo", "rab_" + 1).field("rank", rankValue).endObject() + .endObject(); + } + builder.endArray(); + } + builder.endObject(); + + SimulatePipelineRequest request = new SimulatePipelineRequest( + BytesReference.bytes(builder), + builder.contentType() + ); + request.setVerbose(isVerbose); + SimulatePipelineResponse response = + execute(request, highLevelClient().ingest()::simulatePipeline, highLevelClient().ingest()::simulatePipelineAsync); + List results = response.getResults(); + assertEquals(1, results.size()); + if (isVerbose) { + assertThat(results.get(0), instanceOf(SimulateDocumentVerboseResult.class)); + SimulateDocumentVerboseResult verboseResult = (SimulateDocumentVerboseResult) results.get(0); + assertEquals(2, verboseResult.getProcessorResults().size()); + if (isFailure) { + assertNotNull(verboseResult.getProcessorResults().get(1).getFailure()); + assertThat(verboseResult.getProcessorResults().get(1).getFailure().getMessage(), + containsString("unable to convert [non-int] to integer")); + } else { + assertEquals( + verboseResult.getProcessorResults().get(0).getIngestDocument() + .getFieldValue("foo", String.class), + "bar" + ); + assertEquals( + Integer.valueOf(1234), + verboseResult.getProcessorResults().get(1).getIngestDocument() + .getFieldValue("rank", Integer.class) + ); + } + } else { + assertThat(results.get(0), instanceOf(SimulateDocumentBaseResult.class)); + SimulateDocumentBaseResult baseResult = (SimulateDocumentBaseResult)results.get(0); + if (isFailure) { + assertNotNull(baseResult.getFailure()); + assertThat(baseResult.getFailure().getMessage(), + containsString("unable to convert [non-int] to integer")); + } else { + assertNotNull(baseResult.getIngestDocument()); + assertEquals( + baseResult.getIngestDocument().getFieldValue("foo", String.class), + "bar" + ); + assertEquals( + Integer.valueOf(1234), + baseResult.getIngestDocument() + .getFieldValue("rank", Integer.class) + ); + } + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index e416b3bd29fe8..8035e1582c2dd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -75,6 +75,7 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; @@ -1622,6 +1623,34 @@ public void testDeletePipeline() { assertEquals(expectedParams, expectedRequest.getParameters()); } + public void testSimulatePipeline() throws IOException { + String pipelineId = randomBoolean() ? "some_pipeline_id" : null; + boolean verbose = randomBoolean(); + String json = "{\"pipeline\":{" + + "\"description\":\"_description\"," + + "\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]}," + + "\"docs\":[{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}]}"; + SimulatePipelineRequest request = new SimulatePipelineRequest( + new BytesArray(json.getBytes(StandardCharsets.UTF_8)), + XContentType.JSON + ); + request.setId(pipelineId); + request.setVerbose(verbose); + Map expectedParams = new HashMap<>(); + expectedParams.put("verbose", Boolean.toString(verbose)); + + Request expectedRequest = RequestConverters.simulatePipeline(request); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + endpoint.add("_ingest/pipeline"); + if (pipelineId != null && !pipelineId.isEmpty()) + endpoint.add(pipelineId); + endpoint.add("_simulate"); + assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); + assertEquals(HttpPost.METHOD_NAME, expectedRequest.getMethod()); + assertEquals(expectedParams, expectedRequest.getParameters()); + assertToXContentBody(request, expectedRequest.getEntity()); + } + public void testClusterHealth() { ClusterHealthRequest healthRequest = new ClusterHealthRequest(); Map expectedParams = new HashMap<>(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 8b8998baff581..c706a3a03f203 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -114,7 +114,7 @@ public void testIndex() throws Exception { .source(jsonMap); // <1> //end::index-request-map IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); } { //tag::index-request-xcontent @@ -130,7 +130,7 @@ public void testIndex() throws Exception { .source(builder); // <1> //end::index-request-xcontent IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, indexResponse.getResult()); } { //tag::index-request-shortcut @@ -140,7 +140,7 @@ public void testIndex() throws Exception { "message", "trying out Elasticsearch"); // <1> //end::index-request-shortcut IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, indexResponse.getResult()); } { //tag::index-request-string @@ -159,7 +159,7 @@ public void testIndex() throws Exception { // tag::index-execute IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT); // end::index-execute - assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, indexResponse.getResult()); // tag::index-response String index = indexResponse.getIndex(); @@ -273,7 +273,7 @@ public void testUpdate() throws Exception { { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", 0); IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); - assertSame(indexResponse.status(), RestStatus.CREATED); + assertSame(RestStatus.CREATED, indexResponse.status()); Request request = new Request("POST", "/_scripts/increment-field"); request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder() @@ -284,7 +284,7 @@ public void testUpdate() throws Exception { .endObject() .endObject())); Response response = client().performRequest(request); - assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus()); + assertEquals(RestStatus.OK.getStatus(), response.getStatusLine().getStatusCode()); } { //tag::update-request @@ -302,7 +302,7 @@ public void testUpdate() throws Exception { request.script(inline); // <3> //end::update-request-with-inline-script UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); - assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertEquals(4, updateResponse.getGetResult().getSource().get("field")); request = new UpdateRequest("posts", "doc", "1").fetchSource(true); @@ -312,7 +312,7 @@ public void testUpdate() throws Exception { request.script(stored); // <2> //end::update-request-with-stored-script updateResponse = client.update(request, RequestOptions.DEFAULT); - assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertEquals(8, updateResponse.getGetResult().getSource().get("field")); } { @@ -324,7 +324,7 @@ public void testUpdate() throws Exception { .doc(jsonMap); // <1> //end::update-request-with-doc-as-map UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); - assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); } { //tag::update-request-with-doc-as-xcontent @@ -339,7 +339,7 @@ public void testUpdate() throws Exception { .doc(builder); // <1> //end::update-request-with-doc-as-xcontent UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); - assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); } { //tag::update-request-shortcut @@ -348,7 +348,7 @@ public void testUpdate() throws Exception { "reason", "daily update"); // <1> //end::update-request-shortcut UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); - assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); } { //tag::update-request-with-doc-as-string @@ -363,7 +363,7 @@ public void testUpdate() throws Exception { // tag::update-execute UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); // end::update-execute - assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); // tag::update-response String index = updateResponse.getIndex(); @@ -438,7 +438,7 @@ public void testUpdate() throws Exception { request.fetchSource(true); // <1> //end::update-request-no-source UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); - assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertNotNull(updateResponse.getGetResult()); assertEquals(3, updateResponse.getGetResult().sourceAsMap().size()); } @@ -450,7 +450,7 @@ public void testUpdate() throws Exception { request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1> //end::update-request-source-include UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); - assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); Map sourceAsMap = updateResponse.getGetResult().sourceAsMap(); assertEquals(2, sourceAsMap.size()); assertEquals("source includes", sourceAsMap.get("reason")); @@ -464,7 +464,7 @@ public void testUpdate() throws Exception { request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1> //end::update-request-source-exclude UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); - assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); Map sourceAsMap = updateResponse.getGetResult().sourceAsMap(); assertEquals(2, sourceAsMap.size()); assertEquals("source excludes", sourceAsMap.get("reason")); @@ -545,7 +545,7 @@ public void testDelete() throws Exception { { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", "value"); IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); - assertSame(indexResponse.status(), RestStatus.CREATED); + assertSame(RestStatus.CREATED, indexResponse.status()); } { @@ -559,7 +559,7 @@ public void testDelete() throws Exception { // tag::delete-execute DeleteResponse deleteResponse = client.delete(request, RequestOptions.DEFAULT); // end::delete-execute - assertSame(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + assertSame(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); // tag::delete-response String index = deleteResponse.getIndex(); @@ -615,7 +615,7 @@ public void testDelete() throws Exception { { IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "1").source("field", "value") , RequestOptions.DEFAULT); - assertSame(indexResponse.status(), RestStatus.CREATED); + assertSame(RestStatus.CREATED, indexResponse.status()); // tag::delete-conflict try { @@ -631,7 +631,7 @@ public void testDelete() throws Exception { { IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "async").source("field", "value"), RequestOptions.DEFAULT); - assertSame(indexResponse.status(), RestStatus.CREATED); + assertSame(RestStatus.CREATED, indexResponse.status()); DeleteRequest request = new DeleteRequest("posts", "doc", "async"); @@ -676,7 +676,7 @@ public void testBulk() throws Exception { // tag::bulk-execute BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); // end::bulk-execute - assertSame(bulkResponse.status(), RestStatus.OK); + assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } { @@ -689,7 +689,7 @@ public void testBulk() throws Exception { .source(XContentType.JSON,"field", "baz")); // end::bulk-request-with-mixed-operations BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); - assertSame(bulkResponse.status(), RestStatus.OK); + assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); // tag::bulk-response @@ -788,7 +788,7 @@ public void testGet() throws Exception { "postDate", new Date(), "message", "trying out Elasticsearch"); IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); } { //tag::get-request diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java index f5bdc9f2f3ee5..c53ec2b5d7cc7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java @@ -25,6 +25,12 @@ import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.SimulateDocumentBaseResult; +import org.elasticsearch.action.ingest.SimulateDocumentResult; +import org.elasticsearch.action.ingest.SimulateDocumentVerboseResult; +import org.elasticsearch.action.ingest.SimulatePipelineRequest; +import org.elasticsearch.action.ingest.SimulatePipelineResponse; +import org.elasticsearch.action.ingest.SimulateProcessorResult; import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RequestOptions; @@ -277,4 +283,109 @@ public void onFailure(Exception e) { } } + public void testSimulatePipeline() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + // tag::simulate-pipeline-request + String source = + "{\"" + + "pipeline\":{" + + "\"description\":\"_description\"," + + "\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]" + + "}," + + "\"docs\":[" + + "{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"bar\"}}," + + "{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}" + + "]" + + "}"; + SimulatePipelineRequest request = new SimulatePipelineRequest( + new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <1> + XContentType.JSON // <2> + ); + // end::simulate-pipeline-request + + // tag::simulate-pipeline-request-pipeline-id + request.setId("my-pipeline-id"); // <1> + // end::simulate-pipeline-request-pipeline-id + + // For testing we set this back to null + request.setId(null); + + // tag::simulate-pipeline-request-verbose + request.setVerbose(true); // <1> + // end::simulate-pipeline-request-verbose + + // tag::simulate-pipeline-execute + SimulatePipelineResponse response = client.ingest().simulatePipeline(request, RequestOptions.DEFAULT); // <1> + // end::simulate-pipeline-execute + + // tag::simulate-pipeline-response + for (SimulateDocumentResult result: response.getResults()) { // <1> + if (request.isVerbose()) { + assert result instanceof SimulateDocumentVerboseResult; + SimulateDocumentVerboseResult verboseResult = (SimulateDocumentVerboseResult)result; // <2> + for (SimulateProcessorResult processorResult: verboseResult.getProcessorResults()) { // <3> + processorResult.getIngestDocument(); // <4> + processorResult.getFailure(); // <5> + } + } else { + assert result instanceof SimulateDocumentBaseResult; + SimulateDocumentBaseResult baseResult = (SimulateDocumentBaseResult)result; // <6> + baseResult.getIngestDocument(); // <7> + baseResult.getFailure(); // <8> + } + } + // end::simulate-pipeline-response + assert(response.getResults().size() > 0); + } + } + + public void testSimulatePipelineAsync() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + String source = + "{\"" + + "pipeline\":{" + + "\"description\":\"_description\"," + + "\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]" + + "}," + + "\"docs\":[" + + "{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"bar\"}}," + + "{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}" + + "]" + + "}"; + SimulatePipelineRequest request = new SimulatePipelineRequest( + new BytesArray(source.getBytes(StandardCharsets.UTF_8)), + XContentType.JSON + ); + + // tag::simulate-pipeline-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(SimulatePipelineResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::simulate-pipeline-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::simulate-pipeline-execute-async + client.ingest().simulatePipelineAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::simulate-pipeline-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + } diff --git a/distribution/packages/src/common/scripts/preinst b/distribution/packages/src/common/scripts/preinst index 2aec2172ad856..22f2405af3c2b 100644 --- a/distribution/packages/src/common/scripts/preinst +++ b/distribution/packages/src/common/scripts/preinst @@ -9,6 +9,18 @@ # $1=1 : indicates an new install # $1=2 : indicates an upgrade +# Check for these at preinst time due to failures in postinst if they do not exist +if [ -x "$JAVA_HOME/bin/java" ]; then + JAVA="$JAVA_HOME/bin/java" +else + JAVA=`which java` +fi + +if [ -z "$JAVA" ]; then + echo "could not find java; set JAVA_HOME or ensure java is in PATH" + exit 1 +fi + case "$1" in # Debian #################################################### diff --git a/docs/java-rest/high-level/ingest/simulate_pipeline.asciidoc b/docs/java-rest/high-level/ingest/simulate_pipeline.asciidoc new file mode 100644 index 0000000000000..9d1bbd06ceb26 --- /dev/null +++ b/docs/java-rest/high-level/ingest/simulate_pipeline.asciidoc @@ -0,0 +1,90 @@ +[[java-rest-high-ingest-simulate-pipeline]] +=== Simulate Pipeline API + +[[java-rest-high-ingest-simulate-pipeline-request]] +==== Simulate Pipeline Request + +A `SimulatePipelineRequest` requires a source and a `XContentType`. The source consists +of the request body. See the https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html[docs] +for more details on the request body. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-request] +-------------------------------------------------- +<1> The request body as a `ByteArray`. +<2> The XContentType for the request body supplied above. + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-request-pipeline-id] +-------------------------------------------------- +<1> You can either specify an existing pipeline to execute against the provided documents, or supply a +pipeline definition in the body of the request. This option sets the id for an existing pipeline. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-request-verbose] +-------------------------------------------------- +<1> To see the intermediate results of each processor in the simulate request, you can add the verbose parameter +to the request. + +[[java-rest-high-ingest-simulate-pipeline-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-execute] +-------------------------------------------------- +<1> Execute the request and get back the response in a `SimulatePipelineResponse` object. + +[[java-rest-high-ingest-simulate-pipeline-async]] +==== Asynchronous Execution + +The asynchronous execution of a simulate pipeline request requires both the `SimulatePipelineRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-execute-async] +-------------------------------------------------- +<1> The `SimulatePipelineRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `SimulatePipelineResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-ingest-simulate-pipeline-response]] +==== Simulate Pipeline Response + +The returned `SimulatePipelineResponse` allows to retrieve information about the executed + operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-response] +-------------------------------------------------- +<1> Get results for each of the documents provided as instance of `List`. +<2> If the request was in verbose mode cast the response to `SimulateDocumentVerboseResult`. +<3> Check the result after each processor is applied. +<4> Get the ingest document for the result obtained in 3. +<5> Or get the failure for the result obtained in 3. +<6> Get the result as `SimulateDocumentBaseResult` if the result was not verbose. +<7> Get the ingest document for the result obtained in 6. +<8> Or get the failure for the result obtained in 6. diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 3caab5100ca0f..9ed54db817551 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -125,10 +125,12 @@ The Java High Level REST Client supports the following Ingest APIs: * <> * <> * <> +* <> include::ingest/put_pipeline.asciidoc[] include::ingest/get_pipeline.asciidoc[] include::ingest/delete_pipeline.asciidoc[] +include::ingest/simulate_pipeline.asciidoc[] == Snapshot APIs diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index a7aaf0177f5b9..8876cf21b02c1 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1075,9 +1075,10 @@ then it aborts the execution and leaves the array unmodified. .Foreach Options [options="header"] |====== -| Name | Required | Default | Description -| `field` | yes | - | The array field -| `processor` | yes | - | The processor to execute against each field +| Name | Required | Default | Description +| `field` | yes | - | The array field +| `processor` | yes | - | The processor to execute against each field +| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document |====== Assume the following document: diff --git a/docs/reference/sql/appendix/index.asciidoc b/docs/reference/sql/appendix/index.asciidoc new file mode 100644 index 0000000000000..b00176a8a3f67 --- /dev/null +++ b/docs/reference/sql/appendix/index.asciidoc @@ -0,0 +1 @@ +include::syntax-reserved.asciidoc[] \ No newline at end of file diff --git a/docs/reference/sql/language/reserved.asciidoc b/docs/reference/sql/appendix/syntax-reserved.asciidoc similarity index 98% rename from docs/reference/sql/language/reserved.asciidoc rename to docs/reference/sql/appendix/syntax-reserved.asciidoc index 1ae551cc43c08..7a502d6eea939 100644 --- a/docs/reference/sql/language/reserved.asciidoc +++ b/docs/reference/sql/appendix/syntax-reserved.asciidoc @@ -1,7 +1,8 @@ [role="xpack"] [testenv="basic"] -[[sql-spec-reserved]] -=== Reserved Keywords +[appendix] +[[sql-syntax-reserved]] += Reserved Keywords Table with reserved keywords that need to be quoted. Also provide an example to make it more obvious. diff --git a/docs/reference/sql/concepts.asciidoc b/docs/reference/sql/concepts.asciidoc new file mode 100644 index 0000000000000..1dc23e391fab1 --- /dev/null +++ b/docs/reference/sql/concepts.asciidoc @@ -0,0 +1,65 @@ +[role="xpack"] +[testenv="basic"] +[[sql-concepts]] +== Conventions and Terminology + +For clarity, it is important to establish the meaning behind certain words as, the same wording might convey different meanings to different readers depending on one's familiarity with SQL versus {es}. + +NOTE: This documentation while trying to be complete, does assume the reader has _basic_ understanding of {es} and/or SQL. If that is not the case, please continue reading the documentation however take notes and pursue the topics that are unclear either through the main {es} documentation or through the plethora of SQL material available in the open (there are simply too many excellent resources here to enumerate). + +As a general rule, {es-sql} as the name indicates provides a SQL interface to {es}. As such, it follows the SQL terminology and conventions first, whenever possible. However the backing engine itself is {es} for which {es-sql} was purposely created hence why features or concepts that are not available, or cannot be mapped correctly, in SQL appear +in {es-sql}. +Last but not least, {es-sql} tries to obey the https://en.wikipedia.org/wiki/Principle_of_least_astonishment[principle of least suprise], though as all things in the world, everything is relative. + +=== Mapping concepts across SQL and {es} + +While SQL and {es} have different terms for the way the data is organized (and different semantics), essentially their purpose is the same. + +So let's start from the bottom; these roughly are: + +[cols="1,1,5", options="header"] +|=== +|SQL +|{es} +|Description + +|`column` +|`field` +|In both cases, at the lowest level, data is stored in in _named_ entries, of a variety of <>, containing _one_ value. SQL calls such an entry a _column_ while {es} a _field_. +Notice that in {es} a field can contain _multiple_ values of the same type (esentially a list) while in SQL, a _column_ can contain _exactly_ one value of said type. +{es-sql} will do its best to preserve the SQL semantic and, depending on the query, reject those that return fields with more than one value. + +|`row` +|`document` +|++Column++s and ++field++s do _not_ exist by themselves; they are part of a `row` or a `document`. The two have slightly different semantics: a `row` tends to be _strict_ (and have more enforcements) while a `document` tends to be a bit more flexible or loose (while still having a structure). + +|`table` +|`index` +|The target against which queries, whether in SQL or {es} get executed against. + +|`schema` +|_implicit_ +|In RDBMS, `schema` is mainly a namespace of tables and typically used as a security boundary. {es} does not provide an equivalent concept for it. However when security is enabled, {es} automatically applies the security enforcement so that a role sees only the data it is allowed to (in SQL jargon, its _schema_). + +|`catalog` or `database` +|`cluster` instance +|In SQL, `catalog` or `database` are used interchangebly and represent a set of schemas that is, a number of tables. +In {es} the set of indices available are grouped in a `cluster`. The semantics also differ a bit; a `database` is essentially yet another namespace (which can have some implications on the way data is stored) while an {es} `cluster` is a runtime instance, or rather a set of at least one {es} instance (typically running distributed). +In practice this means that while in SQL one can potentially have multiple catalogs inside an instance, in {es} one is restricted to only _one_. + +|`cluster` +|`cluster` (federated) +|Traditionally in SQL, _cluster_ refers to a single RDMBS instance which contains a number of ++catalog++s or ++database++s (see above). The same word can be reused inside {es} as well however its semantic clarified a bit. + +While RDBMS tend to have only one running instance, on a single machine (_not_ distributed), {es} goes the opposite way and by default, is distributed and multi-instance. + +Further more, an {es} `cluster` can be connected to other ++cluster++s in a _federated_ fashion thus `cluster` means: + +single cluster:: +Multiple {es} instances typically distributed across machines, running within the same namespace. +multiple clusters:: +Multiple clusters, each with its own namespace, connected to each other in a federated setup (see <>). + +|=== + +As one can see while the mapping between the concepts are not exactly one to one and the semantics somewhat different, there are more things in common than differences. In fact, thanks to SQL declarative nature, many concepts can move across {es} transparently and the terminology of the two likely to be used interchangebly through-out the rest of the material. \ No newline at end of file diff --git a/docs/reference/sql/endpoints/cli.asciidoc b/docs/reference/sql/endpoints/cli.asciidoc index 206d687d97a5f..0908c2344bb15 100644 --- a/docs/reference/sql/endpoints/cli.asciidoc +++ b/docs/reference/sql/endpoints/cli.asciidoc @@ -38,18 +38,3 @@ James S.A. Corey |Leviathan Wakes |561 |1306972800000 -------------------------------------------------- // TODO it'd be lovely to be able to assert that this is correct but // that is probably more work then it is worth right now. - -[[sql-cli-permissions]] -[NOTE] -=============================== -If you are using Security you need to add a few permissions to -users so they can run SQL. To run SQL using the CLI a user needs -`read`, `indices:admin/get`, and `cluster:monitor/main`. The -following example configures a role that can run SQL in the CLI -for the `test` and `bort` indices: - -["source","yaml",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-tests}/security/roles.yml[cli_jdbc] --------------------------------------------------- -=============================== diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc index 2125cc2ee839c..6a8793f7e24e2 100644 --- a/docs/reference/sql/endpoints/jdbc.asciidoc +++ b/docs/reference/sql/endpoints/jdbc.asciidoc @@ -37,11 +37,11 @@ from `artifacts.elastic.co/maven` by adding it to the repositories list: [float] === Setup -The driver main class is `org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver`. Note the driver -also implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registerd automatically +The driver main class is `org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver`. +Note the driver implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registerd automatically as long as its available in the classpath. -Once registered, the driver expects the following syntax as an URL: +Once registered, the driver understands the following syntax as an URL: ["source","text",subs="attributes"] ---- @@ -121,12 +121,12 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que To put all of it together, the following URL: -["source","text",subs="attributes"] +["source","text"] ---- jdbc:es://http://server:3456/timezone=UTC&page.size=250 ---- -Opens up a {es-jdbc} connection to `server` on port `3456`, setting the JDBC timezone to `UTC` and its pagesize to `250` entries. +Opens up a {es-sql} connection to `server` on port `3456`, setting the JDBC connection timezone to `UTC` and its pagesize to `250` entries. === API usage @@ -176,20 +176,4 @@ connection. For example: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{jdbc-tests}/SimpleExampleTestCase.java[simple_example] --------------------------------------------------- - -[[sql-jdbc-permissions]] -[NOTE] -=============================== -If you are using Security you need to add a few permissions to -users so they can run SQL. To run SQL a user needs `read` and -`indices:admin/get`. Some parts of the API require -`cluster:monitor/main`. The following example configures a -role that can run SQL in JDBC querying the `test` and `bort` -indices: - -["source","yaml",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-tests}/security/roles.yml[cli_jdbc] --------------------------------------------------- -=============================== +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index ef4c08ba483bc..f33189303e682 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -188,17 +188,3 @@ or fewer results though. `time_zone` is the time zone to use for date functions and date parsing. `time_zone` defaults to `utc` and can take any values documented http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[here]. - -[[sql-rest-permissions]] -[NOTE] -=============================== -If you are using Security you need to add a few permissions to -users so they can run SQL. To run SQL a user needs `read` and -`indices:admin/get`. The following example configures a role -that can run SQL against the `test` and `bort` indices: - -["source","yaml",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-tests}/security/roles.yml[rest] --------------------------------------------------- -=============================== diff --git a/docs/reference/sql/endpoints/translate.asciidoc b/docs/reference/sql/endpoints/translate.asciidoc index 3f2f87ab2e2f5..db450b5f914c8 100644 --- a/docs/reference/sql/endpoints/translate.asciidoc +++ b/docs/reference/sql/endpoints/translate.asciidoc @@ -58,18 +58,3 @@ the normal <> API. The request body accepts all of the <> that the <> accepts except `cursor`. - -[[sql-translate-permissions]] -[NOTE] -=============================== -If you are using Security you need to add a few permissions to -users so they can run translate SQL. To translate SQL a user -needs `read` and `indices:admin/get`. The following example -configures a role that can run SQL against the `test` and -`bort` indices: - -["source","yaml",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-tests}/security/roles.yml[rest] --------------------------------------------------- -=============================== diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 653b5a92fec52..93d201a182828 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -350,6 +350,25 @@ include-tagged::{sql-specs}/datetime.csv-spec[minuteOfHour] include-tagged::{sql-specs}/datetime.csv-spec[secondOfMinute] -------------------------------------------------- +* Extract + +As an alternative, one can support `EXTRACT` to extract fields from datetimes. +You can run any <> +with `EXTRACT( FROM )`. So + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[extractDayOfYear] +-------------------------------------------------- + +is the equivalent to + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear] +-------------------------------------------------- + + [[sql-functions-aggregate]] === Aggregate Functions diff --git a/docs/reference/sql/index.asciidoc b/docs/reference/sql/index.asciidoc index 3d69a240a9a6f..33b9da9fab93d 100644 --- a/docs/reference/sql/index.asciidoc +++ b/docs/reference/sql/index.asciidoc @@ -20,7 +20,11 @@ indices and return results in tabular format. <>:: Overview of {es-sql} and its features. <>:: - Start using SQL right away in {es} + Start using SQL right away in {es}. +<>:: + Language conventions across SQL and {es}. +<>:: + Securing {es-sql} and {es}. <>:: Accepts SQL in a JSON document, executes it, and returns the results. @@ -32,18 +36,20 @@ indices and return results in tabular format. SQL and print tabular results. <>:: A JDBC driver for {es}. +<>:: + Overview of the {es-sql} language, such as supported data types, commands and + syntax. <>:: List of functions and operators supported. -<>:: - Overview of the {es-sql} language, such as data types, syntax and - reserved keywords. - -- include::overview.asciidoc[] include::getting-started.asciidoc[] +include::concepts.asciidoc[] +include::security.asciidoc[] include::endpoints/index.asciidoc[] -include::functions/index.asciidoc[] include::language/index.asciidoc[] +include::functions/index.asciidoc[] +include::appendix/index.asciidoc[] :jdbc-tests!: diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index 0ea152f639d61..7f98add97248b 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[sql-data-types]] -=== Data Types +== Data Types Most of {es} <> are available in {es-sql}, as indicated below: @@ -44,7 +44,7 @@ uses the data type _particularities_ of the former over the latter as ultimately [[sql-multi-field]] [float] -==== SQL and multi-fields +=== SQL and multi-fields A core concept in {es} is that of an `analyzed` field, that is a full-text value that is interpreted in order to be effectively indexed. These fields are of type <> and are not used for sorting or aggregations as their actual value depends on the <> used hence why {es} also offers the <> type for storing the _exact_ diff --git a/docs/reference/sql/language/index.asciidoc b/docs/reference/sql/language/index.asciidoc index de8528242b07a..6558e9ad92bf8 100644 --- a/docs/reference/sql/language/index.asciidoc +++ b/docs/reference/sql/language/index.asciidoc @@ -3,9 +3,10 @@ [[sql-spec]] == SQL Language -This chapter describes the SQL syntax and data types supported in X-Pack. -As a general rule, the syntax tries to adhere as much as possible to ANSI SQL to make the transition seamless. +This chapter describes the SQL semantics supported in X-Pack namely: + +<>:: Data types +<>:: Commands include::data-types.asciidoc[] -include::syntax.asciidoc[] -include::reserved.asciidoc[] +include::syntax/index.asciidoc[] diff --git a/docs/reference/sql/language/syntax.asciidoc b/docs/reference/sql/language/syntax.asciidoc deleted file mode 100644 index 2565c54166095..0000000000000 --- a/docs/reference/sql/language/syntax.asciidoc +++ /dev/null @@ -1,125 +0,0 @@ -[role="xpack"] -[testenv="basic"] -[[sql-spec-syntax]] -=== SQL Statement Syntax - -// Big list of the entire syntax in SQL - -// Each entry might get its own file and code snippet - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/select.sql-spec[wildcardWithOrder] --------------------------------------------------- - - -[[sql-spec-syntax-order-by]] -==== `ORDER BY` - -Elasticsearch supports `ORDER BY` for consistent ordering. You add -any field in the index that has <> or -`SCORE()` to sort by `_score`. By default SQL sorts on what it -considers to be the most efficient way to get the results. - -So sorting by a field looks like: - -[source,js] --------------------------------------------------- -POST /_xpack/sql?format=txt -{ - "query": "SELECT * FROM library ORDER BY page_count DESC LIMIT 5" -} --------------------------------------------------- -// CONSOLE -// TEST[setup:library] - -which results in something like: - -[source,text] --------------------------------------------------- - author | name | page_count | release_date ------------------+--------------------+---------------+------------------------ -Peter F. Hamilton|Pandora's Star |768 |2004-03-02T00:00:00.000Z -Vernor Vinge |A Fire Upon the Deep|613 |1992-06-01T00:00:00.000Z -Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z -Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00.000Z -James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00.000Z --------------------------------------------------- -// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/] -// TESTRESPONSE[_cat] - -[[sql-spec-syntax-order-by-score]] -For sorting by score to be meaningful you need to include a full -text query in the `WHERE` clause. If you include multiple full -text queries in the `WHERE` clause then their scores will be -combined using the same rules as Elasticsearch's -<>. Here is a simple example: - -[source,js] --------------------------------------------------- -POST /_xpack/sql?format=txt -{ - "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY SCORE() DESC" -} --------------------------------------------------- -// CONSOLE -// TEST[setup:library] - -Which results in something like: - -[source,text] --------------------------------------------------- - SCORE() | author | name | page_count | release_date ----------------+---------------+-------------------+---------------+------------------------ -2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z -1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z -1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z -1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z --------------------------------------------------- -// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/] -// TESTRESPONSE[_cat] - -Note that you can return `SCORE()` by adding it to the where clause. This -is possible even if you are not sorting by `SCORE()`: - -[source,js] --------------------------------------------------- -POST /_xpack/sql?format=txt -{ - "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY page_count DESC" -} --------------------------------------------------- -// CONSOLE -// TEST[setup:library] - -[source,text] --------------------------------------------------- - SCORE() | author | name | page_count | release_date ----------------+---------------+-------------------+---------------+------------------------ -2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z -1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z -1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z -1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z --------------------------------------------------- -// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/] -// TESTRESPONSE[_cat] - - -[[sql-spec-syntax-extract]] -==== `EXTRACT` - -Elasticsearch supports `EXTRACT` to extract fields from datetimes. -You can run any <> -with `EXTRACT( FROM )`. So - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[extractDayOfYear] --------------------------------------------------- - -is the equivalent to - -["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear] --------------------------------------------------- diff --git a/docs/reference/sql/language/syntax/describe-table.asciidoc b/docs/reference/sql/language/syntax/describe-table.asciidoc new file mode 100644 index 0000000000000..dd2d27a5781d2 --- /dev/null +++ b/docs/reference/sql/language/syntax/describe-table.asciidoc @@ -0,0 +1,22 @@ +[role="xpack"] +[testenv="basic"] +[[sql-syntax-describe-table]] +=== DESCRIBE TABLE + +.Synopsis +[source, sql] +---- +DESCRIBE table +---- + +or + +[source, sql] +---- +DESC table +---- + + +.Description + +`DESC` and `DESCRIBE` are aliases to <>. diff --git a/docs/reference/sql/language/syntax/index.asciidoc b/docs/reference/sql/language/syntax/index.asciidoc new file mode 100644 index 0000000000000..4af8f19d7034b --- /dev/null +++ b/docs/reference/sql/language/syntax/index.asciidoc @@ -0,0 +1,18 @@ +[role="xpack"] +[testenv="basic"] +[[sql-commands]] +== SQL Commands + +This section contains the list of SQL commands supported by {es-sql} along with their syntax: + +<>:: Describe a table. +<>:: Retrieve rows from zero or more tables. +<>:: List columns in table. +<>:: List supported functions. +<>:: List tables available. + +include::describe-table.asciidoc[] +include::select.asciidoc[] +include::show-columns.asciidoc[] +include::show-functions.asciidoc[] +include::show-tables.asciidoc[] diff --git a/docs/reference/sql/language/syntax/select.asciidoc b/docs/reference/sql/language/syntax/select.asciidoc new file mode 100644 index 0000000000000..4a7c0534b68a3 --- /dev/null +++ b/docs/reference/sql/language/syntax/select.asciidoc @@ -0,0 +1,286 @@ +[role="xpack"] +[testenv="basic"] +[[sql-syntax-select]] +=== SELECT + +.Synopsis +[source, sql] +---- +SELECT select_expr [, ...] +[ FROM table_name ] +[ WHERE condition ] +[ GROUP BY grouping_element [, ...] ] +[ HAVING condition] +[ ORDER BY expression [ ASC | DESC ] [, ...] ] +[ LIMIT [ count ] ] +---- + +.Description + +Retrieves rows from zero or more tables. + +The general execution of `SELECT` is as follows: + +. All elements in the `FROM` list are computed (each element can be base or alias table). Currently `FROM` supports exactly one table. Do note however that the table name can be a pattern (see <> below). +. If the `WHERE` clause is specified, all rows that do not satisfy the condition are eliminated from the output. (See <> below.) +. If the `GROUP BY` clause is specified, or if there are aggregate function calls, the output is combined into groups of rows that match on one or more values, and the results of aggregate functions are computed. If the `HAVING` clause is present, it eliminates groups that do not satisfy the given condition. (See <> and <> below.) +. The actual output rows are computed using the `SELECT` output expressions for each selected row or row group. +. If the `ORDER BY` clause is specified, the returned rows are sorted in the specified order. If `ORDER BY` is not given, the rows are returned in whatever order the system finds fastest to produce. (See <> below.) +. If the `LIMIT` is specified, the `SELECT` statement only returns a subset of the result rows. (See <> below.) + + +[[sql-syntax-select-list]] +==== `SELECT` List + +`SELECT` list, namely the expressions between `SELECT` and `FROM`, represent the output rows of the `SELECT` statement. + +As with a table, every output column of a `SELECT` has a name which can be either specified per column through the `AS` keyword : + +[source,sql] +---- +SELECT column AS c +---- + +assigned by {es-sql} if no name is given: + +[source,sql] +---- +SELECT 1 + 1 +---- + +or if it's a simple column reference, use its name as the column name: + +[source,sql] +---- +SELECT col FROM table +---- + +[[sql-syntax-select-wildcard]] +==== Wildcard + +To select all the columns in the source, one can use `*`: + +["source","sql",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/select.sql-spec[wildcardWithOrder] +-------------------------------------------------- + +which essentially returns all columsn found. + +[[sql-syntax-from]] +[float] +==== FROM Clause + +The `FROM` clause specifies one table for the `SELECT` and has the following syntax: + +[source, sql] +---- +FROM table_name [ [ AS ] alias ] +---- + +where: + +`table_name`:: + +Represents the name (optionally qualified) of an existing table, either a concrete or base one (actual index) or alias. +If the table name contains special SQL characters (such as `.`,`-`,etc...) use double quotes to escape them: +[source, sql] +---- +SELECT ... FROM "some-table" +---- + +The name can be a <> pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that *all* resolved concrete tables have **exact mapping**. + +`alias`:: +A substitute name for the `FROM` item containing the alias. An alias is used for brevity or to eliminate ambiguity. When an alias is provided, it completely hides the actual name of the table and must be used in its place. + +[[sql-syntax-where]] +[float] +==== WHERE Clause + +The optional `WHERE` clause is used to filter rows from the query and has the following syntax: + +[source, sql] +---- +WHERE condition +---- + +where: + +`condition`:: + +Represents an expression that evaluates to a `boolean`. Only the rows that match the condition (to `true`) are returned. + +[[sql-syntax-group-by]] +[float] +==== GROUP BY + +The `GROUP BY` clause is used to divide the results into groups of rows on matching values from the designated columns. It has the following syntax: + +[source, sql] +---- +GROUP BY grouping_element [, ...] +---- + +where: + +`grouping_element`:: + +Represents an expression on which rows are being grouped _on_. It can be a column name, name or ordinal number of a column or an arbitrary expression of column values. + +When a `GROUP BY` clause is used in a `SELECT`, _all_ output expressions must be either aggregate functions or expressions used for grouping or derivates of (otherwise there would be more than one possible value to return for each ungrouped column). + +[[sql-syntax-having]] +[float] +==== HAVING + +The `HAVING` clause can be used _only_ along aggregate functions (and thus `GROUP BY`) to filter what groups are kept or not and has the following syntax: + +[source, sql] +---- +GROUP BY condition +---- + +where: + +`condition`:: + +Represents an expression that evaluates to a `boolean`. Only groups that match the condition (to `true`) are returned. + +Both `WHERE` and `HAVING` are used for filtering however there are several differences between them: + +. `WHERE` works on individual *rows*, `HAVING` works on the *groups* created by ``GROUP BY`` +. `WHERE` is evaluated *before* grouping, `HAVING` is evaluated *after* grouping + +Note that it is possible to have a `HAVING` clause without a ``GROUP BY``. In this case, an __implicit grouping__ is applied, meaning all selected rows are considered to form a single group and `HAVING` can be applied on any of the aggregate functions specified on this group. ` +As such a query emits only a single row (as there is only a single group), `HAVING` condition returns either one row (the group) or zero if the condition fails. + +[[sql-syntax-order-by]] +[float] +==== ORDER BY + +The `ORDER BY` clause is used to sort the results of `SELECT` by one or more expressions: + +[source, sql] +---- +ORDER BY expression [ ASC | DESC ] [, ...] +---- + +where: + +`expression`:: + +Represents an input column, an output column or an ordinal number of the position (starting from one) of an output column. Additionally, ordering can be done based on the results _score_ ` +The direction, if not specified, is by default `ASC` (ascending). ` +Regardless of the ordering specified, null values are ordered last (at the end). + +IMPORTANT: When used along-side, `GROUP BY` expression can point _only_ to the columns used for grouping. + +For example, the following query sorts by an arbitrary input field (`page_count`): + +[source,js] +-------------------------------------------------- +POST /_xpack/sql?format=txt +{ + "query": "SELECT * FROM library ORDER BY page_count DESC LIMIT 5" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +which results in something like: + +[source,text] +-------------------------------------------------- + author | name | page_count | release_date +-----------------+--------------------+---------------+------------------------ +Peter F. Hamilton|Pandora's Star |768 |2004-03-02T00:00:00.000Z +Vernor Vinge |A Fire Upon the Deep|613 |1992-06-01T00:00:00.000Z +Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z +Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00.000Z +James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00.000Z +-------------------------------------------------- +// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/] +// TESTRESPONSE[_cat] + +[[sql-syntax-order-by-score]] +==== Order By Score + +When doing full-text queries in the `WHERE` clause, results can be returned based on their +{defguide}/relevance-intro.html[score] or _relevance_ to the given query. + +NOTE: When doing multiple text queries in the `WHERE` clause then, their scores will be +combined using the same rules as {es}'s +<>. + +To sort based on the `score`, use the special function `SCORE()`: + +[source,js] +-------------------------------------------------- +POST /_xpack/sql?format=txt +{ + "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY SCORE() DESC" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +Which results in something like: + +[source,text] +-------------------------------------------------- + SCORE() | author | name | page_count | release_date +---------------+---------------+-------------------+---------------+------------------------ +2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z +1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z +1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z +1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z +-------------------------------------------------- +// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/] +// TESTRESPONSE[_cat] + +Note that you can return `SCORE()` by adding it to the where clause. This +is possible even if you are not sorting by `SCORE()`: + +[source,js] +-------------------------------------------------- +POST /_xpack/sql?format=txt +{ + "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY page_count DESC" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +[source,text] +-------------------------------------------------- + SCORE() | author | name | page_count | release_date +---------------+---------------+-------------------+---------------+------------------------ +2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z +1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z +1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z +1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z +-------------------------------------------------- +// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/] +// TESTRESPONSE[_cat] + +NOTE: +Trying to return `score` from a non full-text queries will return the same value for all results, as +all are equilley relevant. + +[[sql-syntax-limit]] +[float] +==== LIMIT + +The `LIMIT` clause restricts (limits) the number of rows returns using the format: + +[source, sql] +---- +LIMIT ( count | ALL ) +---- + +where + +count:: is a positive integer or zero indicating the maximum *possible* number of results being returned (as there might be less matches than the limit). If `0` is specified, no results are returned. + +ALL:: indicates there is no limit and thus all results are being returned. diff --git a/docs/reference/sql/language/syntax/show-columns.asciidoc b/docs/reference/sql/language/syntax/show-columns.asciidoc new file mode 100644 index 0000000000000..a52c744f17a97 --- /dev/null +++ b/docs/reference/sql/language/syntax/show-columns.asciidoc @@ -0,0 +1,14 @@ +[role="xpack"] +[testenv="basic"] +[[sql-syntax-show-columns]] +=== SHOW COLUMNS + +.Synopsis +[source, sql] +---- +SHOW COLUMNS [ FROM | IN ] ? table +---- + +.Description + +List the columns in table and their data type (and other attributes). diff --git a/docs/reference/sql/language/syntax/show-functions.asciidoc b/docs/reference/sql/language/syntax/show-functions.asciidoc new file mode 100644 index 0000000000000..964cdf39081c6 --- /dev/null +++ b/docs/reference/sql/language/syntax/show-functions.asciidoc @@ -0,0 +1,16 @@ +[role="xpack"] +[testenv="basic"] +[[sql-syntax-show-functions]] +=== SHOW FUNCTIONS + +.Synopsis +[source, sql] +---- +SHOW FUNCTIONS [ LIKE? pattern<1>? ]? +---- + +<1> SQL match pattern + +.Description + +List all the SQL functions and their type. The `LIKE` clause can be used to restrict the list of names to the given pattern. diff --git a/docs/reference/sql/language/syntax/show-tables.asciidoc b/docs/reference/sql/language/syntax/show-tables.asciidoc new file mode 100644 index 0000000000000..7772c39c6fc21 --- /dev/null +++ b/docs/reference/sql/language/syntax/show-tables.asciidoc @@ -0,0 +1,16 @@ +[role="xpack"] +[testenv="basic"] +[[sql-syntax-show-tables]] +=== SHOW TABLES + +.Synopsis +[source, sql] +---- +SHOW TABLES [ LIKE? pattern<1>? ]? +---- + +<1> SQL match pattern + +.Description + +List the tables available to the current user and their type. The `LIKE` clause can be used to restrict the list of names to the given pattern. diff --git a/docs/reference/sql/overview.asciidoc b/docs/reference/sql/overview.asciidoc index b4b93d92a13d4..a72f5ca61feb5 100644 --- a/docs/reference/sql/overview.asciidoc +++ b/docs/reference/sql/overview.asciidoc @@ -6,6 +6,7 @@ {es-sql} aims to provide a powerful yet lightweight SQL interface to {es}. [[sql-introduction]] +[float] === Introduction {es-sql} is an X-Pack component that allows SQL-like queries to be executed in real-time against {es}. @@ -14,6 +15,7 @@ _natively_ inside {es}. One can think of {es-sql} as a _translator_, one that understands both SQL and {es} and makes it easy to read and process data in real-time, at scale by leveraging {es} capabilities. [[sql-why]] +[float] === Why {es-sql} ? Native integration:: diff --git a/docs/reference/sql/security.asciidoc b/docs/reference/sql/security.asciidoc new file mode 100644 index 0000000000000..64f554f023195 --- /dev/null +++ b/docs/reference/sql/security.asciidoc @@ -0,0 +1,39 @@ +[role="xpack"] +[testenv="basic"] +[[sql-security]] +== Security + +{es-sql} integrates with security, if this is enabled on your cluster. +In such a scenario, {es-sql} supports both security at the transport layer (by encrypting the communication between the consumer and the server) and authentication (for the access layer). + +[float] +==== SSL/TLS configuration + +In case of an encrypted transport, the SSL/TLS support needs to be enabled in {es-sql} to properly establish communication with {es}. This is done by setting the `ssl` property to `true` or by using the `https` prefix in the URL. + +Depending on your SSL configuration (whether the certificates are signed by a CA or not, whether they are global at JVM level or just local to one application), might require setting up the `keystore` and/or `truststore`, that is where the _credentials_ are stored (`keystore` - which typically stores private keys and certificates) and how to _verify_ them (`truststore` - which typically stores certificates from third party also known as CA - certificate authorities). + +Typically (and again, do note that your environment might differ significantly), if the SSL setup for {es-sql} is not already done at the JVM level, one needs to setup the keystore if the {es-sql} security requires client authentication (PKI - Public Key Infrastructure), and setup `truststore` if SSL is enabled. + +[float] +==== Authentication + +The authentication support in {es-sql} is of two types: + +Username/Password:: Set these through `user` and `password` properties. +PKI/X.509:: Use X.509 certificates to authenticate {es-sql} to {es}. For this, one would need to setup the `keystore` containing the private key and certificate to the appropriate user (configured in {es}) and the `truststore` with the CA certificate used to sign the SSL/TLS certificates in the {es} cluster. That is, one should setup the key to authenticate {es-sql} and also to verify that is the right one. To do so, one should set the `ssl.keystore.location` and `ssl.truststore.location` properties to indicate the `keystore` and `truststore` to use. It is recommended to have these secured through a password in which case `ssl.keystore.pass` and `ssl.truststore.pass` properties are required. + +[float] +[[sql-security-permissions]] +==== Permissions (server-side) +Lastly, one the server one need to add a few permissions to +users so they can run SQL. To run SQL a user needs `read` and +`indices:admin/get` permissions at minimum while some parts of +the API require `cluster:monitor/main`. + +The following example configures a role that can run SQL in JDBC querying the `test` and `bort` +indices: + +["source","yaml",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{sql-tests}/security/roles.yml[cli_jdbc] +-------------------------------------------------- + diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java index 2a1046acb9cdb..1c64fdb7408ef 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java @@ -30,6 +30,7 @@ import java.util.Set; import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException; +import static org.elasticsearch.ingest.ConfigurationUtils.readBooleanProperty; import static org.elasticsearch.ingest.ConfigurationUtils.readMap; import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; @@ -47,16 +48,28 @@ public final class ForEachProcessor extends AbstractProcessor { private final String field; private final Processor processor; + private final boolean ignoreMissing; - ForEachProcessor(String tag, String field, Processor processor) { + ForEachProcessor(String tag, String field, Processor processor, boolean ignoreMissing) { super(tag); this.field = field; this.processor = processor; + this.ignoreMissing = ignoreMissing; + } + + boolean isIgnoreMissing() { + return ignoreMissing; } @Override public void execute(IngestDocument ingestDocument) throws Exception { - List values = ingestDocument.getFieldValue(field, List.class); + List values = ingestDocument.getFieldValue(field, List.class, ignoreMissing); + if (values == null) { + if (ignoreMissing) { + return; + } + throw new IllegalArgumentException("field [" + field + "] is null, cannot loop over its elements."); + } List newValues = new ArrayList<>(values.size()); for (Object value : values) { Object previousValue = ingestDocument.getIngestMetadata().put("_value", value); @@ -87,6 +100,7 @@ public static final class Factory implements Processor.Factory { public ForEachProcessor create(Map factories, String tag, Map config) throws Exception { String field = readStringProperty(TYPE, tag, config, "field"); + boolean ignoreMissing = readBooleanProperty(TYPE, tag, config, "ignore_missing", false); Map> processorConfig = readMap(TYPE, tag, config, "processor"); Set>> entries = processorConfig.entrySet(); if (entries.size() != 1) { @@ -94,7 +108,7 @@ public ForEachProcessor create(Map factories, String } Map.Entry> entry = entries.iterator().next(); Processor processor = ConfigurationUtils.readProcessor(factories, entry.getKey(), entry.getValue()); - return new ForEachProcessor(tag, field, processor); + return new ForEachProcessor(tag, field, processor, ignoreMissing); } } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java index 49611d76f4081..f382ad8dcfb6a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java @@ -46,6 +46,24 @@ public void testCreate() throws Exception { assertThat(forEachProcessor, Matchers.notNullValue()); assertThat(forEachProcessor.getField(), equalTo("_field")); assertThat(forEachProcessor.getProcessor(), Matchers.sameInstance(processor)); + assertFalse(forEachProcessor.isIgnoreMissing()); + } + + public void testSetIgnoreMissing() throws Exception { + Processor processor = new TestProcessor(ingestDocument -> { }); + Map registry = new HashMap<>(); + registry.put("_name", (r, t, c) -> processor); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + + Map config = new HashMap<>(); + config.put("field", "_field"); + config.put("processor", Collections.singletonMap("_name", Collections.emptyMap())); + config.put("ignore_missing", true); + ForEachProcessor forEachProcessor = forEachFactory.create(registry, null, config); + assertThat(forEachProcessor, Matchers.notNullValue()); + assertThat(forEachProcessor.getField(), equalTo("_field")); + assertThat(forEachProcessor.getProcessor(), Matchers.sameInstance(processor)); + assertTrue(forEachProcessor.isIgnoreMissing()); } public void testCreateWithTooManyProcessorTypes() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java index 95c25bedb6280..817e254ca9c30 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java @@ -19,14 +19,6 @@ package org.elasticsearch.ingest.common; -import org.elasticsearch.ingest.CompoundProcessor; -import org.elasticsearch.ingest.IngestDocument; -import org.elasticsearch.ingest.Processor; -import org.elasticsearch.ingest.TestProcessor; -import org.elasticsearch.ingest.TestTemplateService; -import org.elasticsearch.script.TemplateScript; -import org.elasticsearch.test.ESTestCase; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -34,7 +26,15 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import org.elasticsearch.ingest.CompoundProcessor; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.TestProcessor; +import org.elasticsearch.ingest.TestTemplateService; +import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.test.ESTestCase; +import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; import static org.hamcrest.Matchers.equalTo; public class ForEachProcessorTests extends ESTestCase { @@ -49,7 +49,8 @@ public void testExecute() throws Exception { ); ForEachProcessor processor = new ForEachProcessor( - "_tag", "values", new UppercaseProcessor("_tag", "_ingest._value", false, "_ingest._value") + "_tag", "values", new UppercaseProcessor("_tag", "_ingest._value", false, "_ingest._value"), + false ); processor.execute(ingestDocument); @@ -69,7 +70,7 @@ public void testExecuteWithFailure() throws Exception { throw new RuntimeException("failure"); } }); - ForEachProcessor processor = new ForEachProcessor("_tag", "values", testProcessor); + ForEachProcessor processor = new ForEachProcessor("_tag", "values", testProcessor, false); try { processor.execute(ingestDocument); fail("exception expected"); @@ -89,7 +90,8 @@ public void testExecuteWithFailure() throws Exception { }); Processor onFailureProcessor = new TestProcessor(ingestDocument1 -> {}); processor = new ForEachProcessor( - "_tag", "values", new CompoundProcessor(false, Arrays.asList(testProcessor), Arrays.asList(onFailureProcessor)) + "_tag", "values", new CompoundProcessor(false, Arrays.asList(testProcessor), Arrays.asList(onFailureProcessor)), + false ); processor.execute(ingestDocument); assertThat(testProcessor.getInvokedCounter(), equalTo(3)); @@ -109,7 +111,7 @@ public void testMetaDataAvailable() throws Exception { id.setFieldValue("_ingest._value.type", id.getSourceAndMetadata().get("_type")); id.setFieldValue("_ingest._value.id", id.getSourceAndMetadata().get("_id")); }); - ForEachProcessor processor = new ForEachProcessor("_tag", "values", innerProcessor); + ForEachProcessor processor = new ForEachProcessor("_tag", "values", innerProcessor, false); processor.execute(ingestDocument); assertThat(innerProcessor.getInvokedCounter(), equalTo(2)); @@ -137,7 +139,7 @@ public void testRestOfTheDocumentIsAvailable() throws Exception { ForEachProcessor processor = new ForEachProcessor( "_tag", "values", new SetProcessor("_tag", new TestTemplateService.MockTemplateScript.Factory("_ingest._value.new_field"), - (model) -> model.get("other"))); + (model) -> model.get("other")), false); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue("values.0.new_field", String.class), equalTo("value")); @@ -174,7 +176,7 @@ public String getTag() { "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", values) ); - ForEachProcessor processor = new ForEachProcessor("_tag", "values", innerProcessor); + ForEachProcessor processor = new ForEachProcessor("_tag", "values", innerProcessor, false); processor.execute(ingestDocument); @SuppressWarnings("unchecked") List result = ingestDocument.getFieldValue("values", List.class); @@ -199,7 +201,7 @@ public void testModifyFieldsOutsideArray() throws Exception { "_tag", "values", new CompoundProcessor(false, Collections.singletonList(new UppercaseProcessor("_tag_upper", "_ingest._value", false, "_ingest._value")), Collections.singletonList(new AppendProcessor("_tag", template, (model) -> (Collections.singletonList("added")))) - )); + ), false); processor.execute(ingestDocument); List result = ingestDocument.getFieldValue("values", List.class); @@ -225,7 +227,7 @@ public void testScalarValueAllowsUnderscoreValueFieldToRemainAccessible() throws TestProcessor processor = new TestProcessor(doc -> doc.setFieldValue("_ingest._value", doc.getFieldValue("_source._value", String.class))); - ForEachProcessor forEachProcessor = new ForEachProcessor("_tag", "values", processor); + ForEachProcessor forEachProcessor = new ForEachProcessor("_tag", "values", processor, false); forEachProcessor.execute(ingestDocument); List result = ingestDocument.getFieldValue("values", List.class); @@ -258,7 +260,7 @@ public void testNestedForEach() throws Exception { doc -> doc.setFieldValue("_ingest._value", doc.getFieldValue("_ingest._value", String.class).toUpperCase(Locale.ENGLISH)) ); ForEachProcessor processor = new ForEachProcessor( - "_tag", "values1", new ForEachProcessor("_tag", "_ingest._value.values2", testProcessor)); + "_tag", "values1", new ForEachProcessor("_tag", "_ingest._value.values2", testProcessor, false), false); processor.execute(ingestDocument); List result = ingestDocument.getFieldValue("values1.0.values2", List.class); @@ -270,4 +272,16 @@ public void testNestedForEach() throws Exception { assertThat(result.get(1), equalTo("JKL")); } + public void testIgnoreMissing() throws Exception { + IngestDocument originalIngestDocument = new IngestDocument( + "_index", "_type", "_id", null, null, null, null, Collections.emptyMap() + ); + IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); + TestProcessor testProcessor = new TestProcessor(doc -> {}); + ForEachProcessor processor = new ForEachProcessor("_tag", "_ingest._value", testProcessor, true); + processor.execute(ingestDocument); + assertIngestDocument(originalIngestDocument, ingestDocument); + assertThat(testProcessor.getInvokedCounter(), equalTo(0)); + } + } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index 9719d15778b53..0fa331ba138f6 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.tasks.Task; import java.io.Closeable; import java.net.SocketAddress; @@ -74,7 +75,7 @@ static Collection returnHttpResponseBodies(Collection static Collection returnOpaqueIds(Collection responses) { List list = new ArrayList<>(responses.size()); for (HttpResponse response : responses) { - list.add(response.headers().get("X-Opaque-Id")); + list.add(response.headers().get(Task.X_OPAQUE_ID)); } return list; } diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 3bca078bd59c4..f000fdfeef5e0 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -23,8 +23,8 @@ esplugin { } versions << [ - 'tika': '1.17', - 'pdfbox': '2.0.8', + 'tika': '1.18', + 'pdfbox': '2.0.9', 'bouncycastle': '1.55', 'poi': '3.17', 'mime4j': '0.8.1' @@ -33,9 +33,10 @@ versions << [ dependencies { // mandatory for tika compile "org.apache.tika:tika-core:${versions.tika}" + // build against Jackson 2.9.5, but still works on our current version compile "org.apache.tika:tika-parsers:${versions.tika}" - compile 'org.tukaani:xz:1.6' - compile 'commons-io:commons-io:2.5' + compile 'org.tukaani:xz:1.8' + compile 'commons-io:commons-io:2.6' compile "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection @@ -62,7 +63,7 @@ dependencies { // MS Office compile "org.apache.poi:poi-scratchpad:${versions.poi}" // Apple iWork - compile 'org.apache.commons:commons-compress:1.14' + compile 'org.apache.commons:commons-compress:1.16.1' // Outlook documents compile "org.apache.james:apache-mime4j-core:${versions.mime4j}" compile "org.apache.james:apache-mime4j-dom:${versions.mime4j}" @@ -118,6 +119,10 @@ thirdPartyAudit.excludes = [ 'com.drew.metadata.jpeg.JpegDirectory', 'com.github.junrar.Archive', 'com.github.junrar.rarfile.FileHeader', + 'com.github.luben.zstd.ZstdInputStream', + 'com.github.luben.zstd.ZstdOutputStream', + 'com.github.openjson.JSONArray', + 'com.github.openjson.JSONObject', 'com.google.common.reflect.TypeToken', 'com.google.gson.Gson', 'com.googlecode.mp4parser.DataSource', @@ -531,6 +536,7 @@ thirdPartyAudit.excludes = [ 'org.apache.commons.exec.PumpStreamHandler', 'org.apache.commons.exec.environment.EnvironmentUtils', 'org.apache.commons.lang.StringUtils', + 'org.apache.commons.lang.SystemUtils', 'org.apache.ctakes.typesystem.type.refsem.UmlsConcept', 'org.apache.ctakes.typesystem.type.textsem.IdentifiedAnnotation', 'org.apache.cxf.jaxrs.client.WebClient', @@ -635,8 +641,6 @@ thirdPartyAudit.excludes = [ 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1SignatureTimeStampList', 'org.etsi.uri.x01903.v14.ValidationDataType$Factory', 'org.etsi.uri.x01903.v14.ValidationDataType', - 'org.json.JSONArray', - 'org.json.JSONObject', 'org.json.simple.JSONArray', 'org.json.simple.JSONObject', 'org.json.simple.parser.JSONParser', diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.14.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.14.jar.sha1 deleted file mode 100644 index a93cac2243e69..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-compress-1.14.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7b18320d668ab080758bf5383d6d8fcf750babce \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.16.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..93be07c90a41c --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-compress-1.16.1.jar.sha1 @@ -0,0 +1 @@ +7b5cdabadb4cf12f5ee0f801399e70635583193f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.5.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.5.jar.sha1 deleted file mode 100644 index b7f1d93e89702..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2852e6e05fbb95076fc091f6d1780f1f8fe35e0f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.6.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.6.jar.sha1 new file mode 100644 index 0000000000000..75f7934c08267 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.6.jar.sha1 @@ -0,0 +1 @@ +815893df5f31da2ece4040fe0a12fd44b577afaf \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.8.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.8.jar.sha1 deleted file mode 100644 index f8abddbc755eb..0000000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52f852fcfc7481d45efdffd224eb78b85981b17b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.9.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.9.jar.sha1 new file mode 100644 index 0000000000000..4ded3b5488825 --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-2.0.9.jar.sha1 @@ -0,0 +1 @@ +f961f17ebdbc307e9055e3cf7c0e207f0895ae55 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.8.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.8.jar.sha1 deleted file mode 100644 index 1c346871e2119..0000000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17bdf273d66f3afe41eedb9d3ab6a7b819c44a0c \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.9.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.9.jar.sha1 new file mode 100644 index 0000000000000..9bf91e07976c2 --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-2.0.9.jar.sha1 @@ -0,0 +1 @@ +d0425578218624388f2ec84a0b3a11efd55df0f5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-1.17.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-1.17.jar.sha1 deleted file mode 100644 index 571314b3378da..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b450102c2aee98107474d2f92661d947b9cef183 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-1.18.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-1.18.jar.sha1 new file mode 100644 index 0000000000000..ef162f03439cc --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-1.18.jar.sha1 @@ -0,0 +1 @@ +69556697de96cf0b22df846e970dafd29866eee0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-1.17.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-1.17.jar.sha1 deleted file mode 100644 index c4487e4970f25..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4277c54fcaed542fbc8a0001fdb4c23baccc0132 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-1.18.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-1.18.jar.sha1 new file mode 100644 index 0000000000000..6441e8b64e7b7 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-1.18.jar.sha1 @@ -0,0 +1 @@ +7d9b6dea91d783165f3313d320d3aaaa9a4dfc13 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xz-1.6.jar.sha1 b/plugins/ingest-attachment/licenses/xz-1.6.jar.sha1 deleted file mode 100644 index d91cd44c0b4d3..0000000000000 --- a/plugins/ingest-attachment/licenses/xz-1.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05b6f921f1810bdf90e25471968f741f87168b64 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xz-1.8.jar.sha1 b/plugins/ingest-attachment/licenses/xz-1.8.jar.sha1 new file mode 100644 index 0000000000000..7455feac7983b --- /dev/null +++ b/plugins/ingest-attachment/licenses/xz-1.8.jar.sha1 @@ -0,0 +1 @@ +c4f7d054303948eb6a4066194253886c8af07128 \ No newline at end of file diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java index 97ca1c0b19774..6606d1bc72727 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java @@ -159,6 +159,7 @@ static PermissionCollection getRestrictedPermissions() { perms.add(new SecurityPermission("putProviderProperty.BC")); perms.add(new SecurityPermission("insertProvider")); perms.add(new ReflectPermission("suppressAccessChecks")); + perms.add(new RuntimePermission("accessClassInPackage.sun.java2d.cmm.kcms")); // xmlbeans, use by POI, needs to get the context classloader perms.add(new RuntimePermission("getClassLoader")); // ZipFile needs accessDeclaredMembers on JDK 10; cf. https://bugs.openjdk.java.net/browse/JDK-8187485 diff --git a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy index 0cd359a99731b..bcc5eef3193d7 100644 --- a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy @@ -31,4 +31,6 @@ grant { permission java.lang.RuntimePermission "getClassLoader"; // ZipFile needs accessDeclaredMembers on Java 10 permission java.lang.RuntimePermission "accessDeclaredMembers"; + // PDFBox checks for the existence of this class + permission java.lang.RuntimePermission "accessClassInPackage.sun.java2d.cmm.kcms"; }; diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java index 598d3f4e8175c..654bc361f53ad 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java @@ -214,6 +214,12 @@ public void testAsciidocDocument() throws Exception { assertThat(attachmentData.get("content_type").toString(), containsString("text/plain")); } + // See (https://issues.apache.org/jira/browse/COMPRESS-432) for information + // about the issue that causes a zip file to hang in Tika versions prior to 1.18. + public void testZipFileDoesNotHang() { + expectThrows(Exception.class, () -> parseDocument("bad_tika.zip", processor)); + } + public void testParseAsBytesArray() throws Exception { String path = "/org/elasticsearch/ingest/attachment/test/sample-files/text-in-english.txt"; byte[] bytes; diff --git a/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/bad_tika.zip b/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/bad_tika.zip new file mode 100644 index 0000000000000..58ebd8411edce Binary files /dev/null and b/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/bad_tika.zip differ diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 21d7c2b76cd44..76fb4de0037ab 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -64,6 +64,8 @@ for (Version version : bwcVersions.wireCompatible) { clusterName = 'rolling-upgrade' setting 'repositories.url.allowed_urls', 'http://snapshot.test*' setting 'node.attr.gen', 'old' + // debug logging for testHistoryUUIDIsGenerated + setting 'logger.level', 'DEBUG' if (version.onOrAfter('5.3.0')) { setting 'http.content_type.required', 'true' } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index a68d1d84b4464..3d9b5e3a7a745 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; @@ -52,7 +53,7 @@ */ public class RecoveryIT extends AbstractRollingTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31291") + @TestLogging("_root:DEBUG") public void testHistoryUUIDIsGenerated() throws Exception { final String index = "index_history_uuid"; if (CLUSTER_TYPE == ClusterType.OLD) { diff --git a/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats index 59aaa3e8a072f..983344dc1ac76 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats @@ -72,6 +72,14 @@ setup() { [ "$status" -eq 1 ] } +@test "[DEB] temporarily remove java and ensure the install fails" { + move_java + run dpkg -i elasticsearch-oss-$(cat version).deb + output=$status + unmove_java + [ "$output" -eq 1 ] +} + @test "[DEB] install package" { dpkg -i elasticsearch-oss-$(cat version).deb } diff --git a/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats b/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats index 52347c7ef4e41..cb12d4b50e02b 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats @@ -71,6 +71,14 @@ setup() { [ "$status" -eq 1 ] } +@test "[RPM] temporarily remove java and ensure the install fails" { + move_java + run rpm -i elasticsearch-oss-$(cat version).rpm + output=$status + unmove_java + [ "$output" -eq 1 ] +} + @test "[RPM] install package" { rpm -i elasticsearch-oss-$(cat version).rpm } diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 8421fe571a5b9..72b3552184422 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -68,8 +68,11 @@ if [ ! -x "`which unzip 2>/dev/null`" ]; then fi if [ ! -x "`which java 2>/dev/null`" ]; then - echo "'java' command is mandatory to run the tests" - exit 1 + # there are some tests that move java temporarily + if [ ! -x "`command -v java.bak 2>/dev/null`" ]; then + echo "'java' command is mandatory to run the tests" + exit 1 + fi fi # Returns 0 if the 'dpkg' command is available @@ -525,3 +528,17 @@ file_privileges_for_user_from_umask() { echo $((0777 & ~$(sudo -E -u $user sh -c umask) & ~0111)) } + +# move java to simulate it not being in the path +move_java() { + which_java=`command -v java` + assert_file_exist $which_java + mv $which_java ${which_java}.bak +} + +# move java back to its original location +unmove_java() { + which_java=`command -v java.bak` + assert_file_exist $which_java + mv $which_java `dirname $which_java`/java +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml index cf4e5b56e786e..4d3abb292f467 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml @@ -13,3 +13,24 @@ indices.get_mapping: index: test_index +--- +"Index missing, ignore_unavailable=true": + - skip: + version: " - 6.99.99" + reason: ignore_unavailable was ignored in previous versions + - do: + indices.get_mapping: + index: test_index + ignore_unavailable: true + + - match: { '': {} } + +--- +"Index missing, ignore_unavailable=true, allow_no_indices=false": + - do: + catch: missing + indices.get_mapping: + index: test_index + ignore_unavailable: true + allow_no_indices: false + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml index 224cc3ec2254e..9dcfe501c8772 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml @@ -95,15 +95,25 @@ setup: --- "Get test-* with wildcard_expansion=none": - skip: - version: " - 5.99.99" - reason: this was a breaking change in 6.0 + version: " - 6.3.99" + reason: allow_no_indices (defaults to true) was ignored in previous versions + - do: + indices.get_mapping: + index: test-x* + expand_wildcards: none + - match: { '': {} } +--- +"Get test-* with wildcard_expansion=none allow_no_indices=false": + - skip: + version: " - 6.99.99" + reason: allow_no_indices was ignored in previous versions - do: catch: missing indices.get_mapping: index: test-x* expand_wildcards: none - + allow_no_indices: false --- "Get test-* with wildcard_expansion=open,closed": diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 7ddb39b6d6225..8186f1b935370 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -257,6 +257,7 @@ import org.elasticsearch.rest.action.admin.indices.RestFlushAction; import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction; import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction; +import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction; @@ -316,6 +317,7 @@ import org.elasticsearch.rest.action.search.RestMultiSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchScrollAction; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.usage.UsageService; @@ -369,7 +371,7 @@ public ActionModule(boolean transportClient, Settings settings, IndexNameExpress destructiveOperations = new DestructiveOperations(settings, clusterSettings); Set headers = Stream.concat( actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()), - Stream.of("X-Opaque-Id") + Stream.of(Task.X_OPAQUE_ID) ).collect(Collectors.toSet()); UnaryOperator restWrapper = null; for (ActionPlugin plugin : actionPlugins) { @@ -556,6 +558,8 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRestoreSnapshotAction(settings, restController)); registerHandler.accept(new RestDeleteSnapshotAction(settings, restController)); registerHandler.accept(new RestSnapshotsStatusAction(settings, restController)); + + registerHandler.accept(new RestGetAllAliasesAction(settings, restController)); registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestIndicesStatsAction(settings, restController)); registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index 81b9812d61c5f..44a66f497c846 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -47,7 +48,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; /** Response object for {@link GetFieldMappingsRequest} API */ -public class GetFieldMappingsResponse extends ActionResponse implements ToXContentFragment { +public class GetFieldMappingsResponse extends ActionResponse implements ToXContentObject { private static final ParseField MAPPINGS = new ParseField("mappings"); @@ -111,6 +112,7 @@ public FieldMappingMetaData fieldMappings(String index, String type, String fiel @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); for (Map.Entry>> indexEntry : mappings.entrySet()) { builder.startObject(indexEntry.getKey()); builder.startObject(MAPPINGS.getPreferredName()); @@ -126,6 +128,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); builder.endObject(); } + builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentBaseResult.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentBaseResult.java index c6252feea276c..f7f76a2bbca7d 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentBaseResult.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentBaseResult.java @@ -19,13 +19,18 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.ingest.IngestDocument; import java.io.IOException; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + /** * Holds the end result of what a pipeline did to sample document provided via the simulate api. */ @@ -33,6 +38,33 @@ public final class SimulateDocumentBaseResult implements SimulateDocumentResult private final WriteableIngestDocument ingestDocument; private final Exception failure; + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "simulate_document_base_result", + true, + a -> { + if (a[1] == null) { + assert a[0] != null; + return new SimulateDocumentBaseResult(((WriteableIngestDocument)a[0]).getIngestDocument()); + } else { + assert a[0] == null; + return new SimulateDocumentBaseResult((ElasticsearchException)a[1]); + } + } + ); + static { + PARSER.declareObject( + optionalConstructorArg(), + WriteableIngestDocument.INGEST_DOC_PARSER, + new ParseField(WriteableIngestDocument.DOC_FIELD) + ); + PARSER.declareObject( + optionalConstructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + new ParseField("error") + ); + } + public SimulateDocumentBaseResult(IngestDocument ingestDocument) { this.ingestDocument = new WriteableIngestDocument(ingestDocument); failure = null; @@ -89,4 +121,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + + public static SimulateDocumentBaseResult fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResult.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResult.java index 21e802981850c..099e238f2d25e 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResult.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResult.java @@ -18,21 +18,38 @@ */ package org.elasticsearch.action.ingest; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + /** * Holds the result of what a pipeline did to a sample document via the simulate api, but instead of {@link SimulateDocumentBaseResult} * this result class holds the intermediate result each processor did to the sample document. */ public final class SimulateDocumentVerboseResult implements SimulateDocumentResult { + public static final String PROCESSOR_RESULT_FIELD = "processor_results"; private final List processorResults; + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "simulate_document_verbose_result", + true, + a -> new SimulateDocumentVerboseResult((List)a[0]) + ); + static { + PARSER.declareObjectArray(constructorArg(), SimulateProcessorResult.PARSER, new ParseField(PROCESSOR_RESULT_FIELD)); + } + public SimulateDocumentVerboseResult(List processorResults) { this.processorResults = processorResults; } @@ -63,7 +80,7 @@ public List getProcessorResults() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startArray("processor_results"); + builder.startArray(PROCESSOR_RESULT_FIELD); for (SimulateProcessorResult processorResult : processorResults) { processorResult.toXContent(builder, params); } @@ -71,4 +88,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + + public static SimulateDocumentVerboseResult fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index 205add8cc543b..53a7ec1d1f7c9 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -25,6 +25,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; @@ -42,7 +44,7 @@ import static org.elasticsearch.ingest.IngestDocument.MetaData; -public class SimulatePipelineRequest extends ActionRequest { +public class SimulatePipelineRequest extends ActionRequest implements ToXContentObject { private String id; private boolean verbose; @@ -122,6 +124,12 @@ public void writeTo(StreamOutput out) throws IOException { } } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.rawValue(source.streamInput(), xContentType); + return builder; + } + public static final class Fields { static final String PIPELINE = "pipeline"; static final String DOCS = "docs"; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java index e9ea1a7750738..991e81a14553b 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java @@ -19,22 +19,90 @@ package org.elasticsearch.action.ingest; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + public class SimulatePipelineResponse extends ActionResponse implements ToXContentObject { private String pipelineId; private boolean verbose; private List results; + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "simulate_pipeline_response", + true, + a -> { + List results = (List)a[0]; + boolean verbose = false; + if (results.size() > 0) { + if (results.get(0) instanceof SimulateDocumentVerboseResult) { + verbose = true; + } + } + return new SimulatePipelineResponse(null, verbose, results); + } + ); + static { + PARSER.declareObjectArray( + constructorArg(), + (parser, context) -> { + Token token = parser.currentToken(); + ensureExpectedToken(Token.START_OBJECT, token, parser::getTokenLocation); + SimulateDocumentResult result = null; + while ((token = parser.nextToken()) != Token.END_OBJECT) { + ensureExpectedToken(token, Token.FIELD_NAME, parser::getTokenLocation); + String fieldName = parser.currentName(); + token = parser.nextToken(); + if (token == Token.START_ARRAY) { + if (fieldName.equals(SimulateDocumentVerboseResult.PROCESSOR_RESULT_FIELD)) { + List results = new ArrayList<>(); + while ((token = parser.nextToken()) == Token.START_OBJECT) { + results.add(SimulateProcessorResult.fromXContent(parser)); + } + ensureExpectedToken(Token.END_ARRAY, token, parser::getTokenLocation); + result = new SimulateDocumentVerboseResult(results); + } else { + parser.skipChildren(); + } + } else if (token.equals(Token.START_OBJECT)) { + switch (fieldName) { + case WriteableIngestDocument.DOC_FIELD: + result = new SimulateDocumentBaseResult( + WriteableIngestDocument.INGEST_DOC_PARSER.apply(parser, null).getIngestDocument() + ); + break; + case "error": + result = new SimulateDocumentBaseResult(ElasticsearchException.fromXContent(parser)); + break; + default: + parser.skipChildren(); + break; + } + } // else it is a value skip it + } + assert result != null; + return result; + }, + new ParseField(Fields.DOCUMENTS)); + } + public SimulatePipelineResponse() { } @@ -98,6 +166,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public static SimulatePipelineResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + static final class Fields { static final String DOCUMENTS = "docs"; } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java index 386a00b391f3c..101ce7ec260e1 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java @@ -19,33 +19,91 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import java.io.IOException; -class SimulateProcessorResult implements Writeable, ToXContentObject { +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class SimulateProcessorResult implements Writeable, ToXContentObject { + + private static final String IGNORED_ERROR_FIELD = "ignored_error"; private final String processorTag; private final WriteableIngestDocument ingestDocument; private final Exception failure; - SimulateProcessorResult(String processorTag, IngestDocument ingestDocument, Exception failure) { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser IGNORED_ERROR_PARSER = + new ConstructingObjectParser<>( + "ignored_error_parser", + true, + a -> (ElasticsearchException)a[0] + ); + static { + IGNORED_ERROR_PARSER.declareObject( + constructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + new ParseField("error") + ); + } + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "simulate_processor_result", + true, + a -> { + String processorTag = a[0] == null ? null : (String)a[0]; + IngestDocument document = a[1] == null ? null : ((WriteableIngestDocument)a[1]).getIngestDocument(); + Exception failure = null; + if (a[2] != null) { + failure = (ElasticsearchException)a[2]; + } else if (a[3] != null) { + failure = (ElasticsearchException)a[3]; + } + return new SimulateProcessorResult(processorTag, document, failure); + } + ); + static { + PARSER.declareString(optionalConstructorArg(), new ParseField(ConfigurationUtils.TAG_KEY)); + PARSER.declareObject( + optionalConstructorArg(), + WriteableIngestDocument.INGEST_DOC_PARSER, + new ParseField(WriteableIngestDocument.DOC_FIELD) + ); + PARSER.declareObject( + optionalConstructorArg(), + IGNORED_ERROR_PARSER, + new ParseField(IGNORED_ERROR_FIELD) + ); + PARSER.declareObject( + optionalConstructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + new ParseField("error") + ); + } + + public SimulateProcessorResult(String processorTag, IngestDocument ingestDocument, Exception failure) { this.processorTag = processorTag; this.ingestDocument = (ingestDocument == null) ? null : new WriteableIngestDocument(ingestDocument); this.failure = failure; } - SimulateProcessorResult(String processorTag, IngestDocument ingestDocument) { + public SimulateProcessorResult(String processorTag, IngestDocument ingestDocument) { this(processorTag, ingestDocument, null); } - SimulateProcessorResult(String processorTag, Exception failure) { + public SimulateProcessorResult(String processorTag, Exception failure) { this(processorTag, null, failure); } @@ -98,7 +156,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } if (failure != null && ingestDocument != null) { - builder.startObject("ignored_error"); + builder.startObject(IGNORED_ERROR_FIELD); ElasticsearchException.generateFailureXContent(builder, params, failure, true); builder.endObject(); } else if (failure != null) { @@ -112,4 +170,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + + public static SimulateProcessorResult fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java b/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java index 87168cb7a9bba..06d32f54bc5c0 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java @@ -20,24 +20,95 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestDocument.MetaData; import java.io.IOException; import java.time.ZoneId; +import java.time.ZonedDateTime; import java.util.Date; +import java.util.HashMap; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + final class WriteableIngestDocument implements Writeable, ToXContentFragment { + static final String SOURCE_FIELD = "_source"; + static final String INGEST_FIELD = "_ingest"; + static final String DOC_FIELD = "doc"; private final IngestDocument ingestDocument; + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser INGEST_DOC_PARSER = + new ConstructingObjectParser<>( + "ingest_document", + true, + a -> { + HashMap sourceAndMetadata = new HashMap<>(); + sourceAndMetadata.put(MetaData.INDEX.getFieldName(), a[0]); + sourceAndMetadata.put(MetaData.TYPE.getFieldName(), a[1]); + sourceAndMetadata.put(MetaData.ID.getFieldName(), a[2]); + if (a[3] != null) { + sourceAndMetadata.put(MetaData.PARENT.getFieldName(), a[3]); + } + if (a[4] != null) { + sourceAndMetadata.put(MetaData.ROUTING.getFieldName(), a[4]); + } + if (a[5] != null) { + sourceAndMetadata.put(MetaData.VERSION.getFieldName(), a[5]); + } + if (a[6] != null) { + sourceAndMetadata.put(MetaData.VERSION_TYPE.getFieldName(), a[6]); + } + sourceAndMetadata.putAll((Map)a[7]); + return new WriteableIngestDocument(new IngestDocument(sourceAndMetadata, (Map)a[8])); + } + ); + static { + INGEST_DOC_PARSER.declareString(constructorArg(), new ParseField(MetaData.INDEX.getFieldName())); + INGEST_DOC_PARSER.declareString(constructorArg(), new ParseField(MetaData.TYPE.getFieldName())); + INGEST_DOC_PARSER.declareString(constructorArg(), new ParseField(MetaData.ID.getFieldName())); + INGEST_DOC_PARSER.declareString(optionalConstructorArg(), new ParseField(MetaData.PARENT.getFieldName())); + INGEST_DOC_PARSER.declareString(optionalConstructorArg(), new ParseField(MetaData.ROUTING.getFieldName())); + INGEST_DOC_PARSER.declareLong(optionalConstructorArg(), new ParseField(MetaData.VERSION.getFieldName())); + INGEST_DOC_PARSER.declareString(optionalConstructorArg(), new ParseField(MetaData.VERSION_TYPE.getFieldName())); + INGEST_DOC_PARSER.declareObject(constructorArg(), (p, c) -> p.map(), new ParseField(SOURCE_FIELD)); + INGEST_DOC_PARSER.declareObject( + constructorArg(), + (p, c) -> { + Map ingestMap = p.map(); + ingestMap.computeIfPresent( + "timestamp", + (k, o) -> ZonedDateTime.parse((String)o) + ); + return ingestMap; + }, + new ParseField(INGEST_FIELD) + ); + } + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "writeable_ingest_document", + true, + a -> (WriteableIngestDocument)a[0] + ); + static { + PARSER.declareObject(constructorArg(), INGEST_DOC_PARSER, new ParseField(DOC_FIELD)); + } + WriteableIngestDocument(IngestDocument ingestDocument) { assert ingestDocument != null; this.ingestDocument = ingestDocument; @@ -67,19 +138,25 @@ IngestDocument getIngestDocument() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("doc"); - Map metadataMap = ingestDocument.extractMetadata(); + builder.startObject(DOC_FIELD); + Map metadataMap = ingestDocument.getMetadata(); for (Map.Entry metadata : metadataMap.entrySet()) { if (metadata.getValue() != null) { builder.field(metadata.getKey().getFieldName(), metadata.getValue().toString()); } } - builder.field("_source", ingestDocument.getSourceAndMetadata()); - builder.field("_ingest", ingestDocument.getIngestMetadata()); + Map source = IngestDocument.deepCopyMap(ingestDocument.getSourceAndMetadata()); + metadataMap.keySet().forEach(mD -> source.remove(mD.getFieldName())); + builder.field(SOURCE_FIELD, source); + builder.field(INGEST_FIELD, ingestDocument.getIngestMetadata()); builder.endObject(); return builder; } + public static WriteableIngestDocument fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java index d8bb04a1a39c3..497dc49198bfc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java @@ -157,9 +157,13 @@ public void computeAndValidateWriteIndex() { List writeIndices = referenceIndexMetaDatas.stream() .filter(idxMeta -> Boolean.TRUE.equals(idxMeta.getAliases().get(aliasName).writeIndex())) .collect(Collectors.toList()); - if (referenceIndexMetaDatas.size() == 1) { - writeIndex.set(referenceIndexMetaDatas.get(0)); - } else if (writeIndices.size() == 1) { + + if (writeIndices.isEmpty() && referenceIndexMetaDatas.size() == 1 + && referenceIndexMetaDatas.get(0).getAliases().get(aliasName).writeIndex() == null) { + writeIndices.add(referenceIndexMetaDatas.get(0)); + } + + if (writeIndices.size() == 1) { writeIndex.set(writeIndices.get(0)); } else if (writeIndices.size() > 1) { List writeIndicesStrings = writeIndices.stream() diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index ba72561f0c145..490f3d680e428 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -489,7 +489,8 @@ private ThreadContextStruct putResponse(final String key, final String value, fi final List existingValues = newResponseHeaders.get(key); if (existingValues != null) { final Set existingUniqueValues = existingValues.stream().map(uniqueValue).collect(Collectors.toSet()); - assert existingValues.size() == existingUniqueValues.size(); + assert existingValues.size() == existingUniqueValues.size() : + "existing values: [" + existingValues + "], existing unique values [" + existingUniqueValues + "]"; if (existingUniqueValues.contains(uniqueValue.apply(value))) { return this; } diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index f3c5d07f1f2f4..10b4c4318a30e 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.tasks.Task; import java.util.Collections; import java.util.concurrent.TimeUnit; @@ -174,6 +175,11 @@ public String toString() { } else { sb.append("source[], "); } + if (context.getTask().getHeader(Task.X_OPAQUE_ID) != null) { + sb.append("id[").append(context.getTask().getHeader(Task.X_OPAQUE_ID)).append("], "); + } else { + sb.append("id[], "); + } return sb.toString(); } } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 38ba01557edd8..9d33981480e05 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1460,6 +1460,7 @@ public void bootstrapNewHistory() throws IOException { map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(seqno.maxSeqNo)); map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(seqno.maxSeqNo)); + logger.debug("bootstrap a new history_uuid [{}]", map); updateCommitData(writer, map); } finally { metadataLock.writeLock().unlock(); @@ -1511,6 +1512,7 @@ public boolean ensureIndexHas6xCommitTags() throws IOException { maps.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1"); } if (maps.isEmpty() == false) { + logger.debug("bootstrap 6.x commit tags [{}]", maps); updateCommitData(writer, maps); return true; } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 89e945780c8f5..5a9683c481513 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -574,6 +574,17 @@ public Map extractMetadata() { return metadataMap; } + /** + * Does the same thing as {@link #extractMetadata} but does not mutate the map. + */ + public Map getMetadata() { + Map metadataMap = new EnumMap<>(MetaData.class); + for (MetaData metaData : MetaData.values()) { + metadataMap.put(metaData, sourceAndMetadata.get(metaData.getFieldName())); + } + return metadataMap; + } + /** * Returns the available ingest metadata fields, by default only timestamp, but it is possible to set additional ones. * Use only for reading values, modify them instead using {@link #setFieldValue(String, Object)} and {@link #removeField(String)} @@ -592,7 +603,7 @@ public Map getSourceAndMetadata() { } @SuppressWarnings("unchecked") - private static Map deepCopyMap(Map source) { + public static Map deepCopyMap(Map source) { return (Map) deepCopy(source); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 43bc383dcc66a..9beafbecb4000 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -138,6 +138,7 @@ import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotsService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskResultsService; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -448,7 +449,7 @@ protected Node(final Environment environment, Collection final Transport transport = networkModule.getTransportSupplier().get(); Set taskHeaders = Stream.concat( pluginsService.filterPlugins(ActionPlugin.class).stream().flatMap(p -> p.getTaskHeaders().stream()), - Stream.of("X-Opaque-Id") + Stream.of(Task.X_OPAQUE_ID) ).collect(Collectors.toSet()); final TransportService transportService = newTransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), taskHeaders); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 8a1e4e74e819e..b24729f50d5f4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -60,8 +60,6 @@ public class RestGetAliasesAction extends BaseRestHandler { public RestGetAliasesAction(final Settings settings, final RestController controller) { super(settings); - controller.registerHandler(GET, "/_alias", this); - controller.registerHandler(GET, "/_aliases", this); controller.registerHandler(GET, "/_alias/{name}", this); controller.registerHandler(HEAD, "/_alias/{name}", this); controller.registerHandler(GET, "/{index}/_alias", this); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllAliasesAction.java new file mode 100644 index 0000000000000..87cadbafd8321 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllAliasesAction.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.RestBuilderListener; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestStatus.OK; + +/** + * The REST handler for retrieving all aliases + */ +public class RestGetAllAliasesAction extends BaseRestHandler { + + public RestGetAllAliasesAction(final Settings settings, final RestController controller) { + super(settings); + controller.registerHandler(GET, "/_alias", this); + controller.registerHandler(GET, "/_aliases", this); + } + + @Override + public String getName() { + return "get_all_aliases_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final GetIndexRequest getIndexRequest = new GetIndexRequest(); + getIndexRequest.indices(Strings.EMPTY_ARRAY); + getIndexRequest.features(Feature.ALIASES); + getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); + getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); + getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); + return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) { + + @Override + public RestResponse buildResponse(final GetIndexResponse response, final XContentBuilder builder) throws Exception { + builder.startObject(); + { + for (final String index : response.indices()) { + builder.startObject(index); + { + writeAliases(response.aliases().get(index), builder, request); + } + builder.endObject(); + } + } + builder.endObject(); + + return new BytesRestResponse(OK, builder); + } + + private void writeAliases(final List aliases, final XContentBuilder builder, + final Params params) throws IOException { + builder.startObject("aliases"); + { + if (aliases != null) { + for (final AliasMetaData alias : aliases) { + AliasMetaData.Builder.toXContent(alias, builder, params); + } + } + } + builder.endObject(); + } + }); + } + +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java index ea68d9cc3c04f..c43f14dcddf26 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java @@ -81,9 +81,7 @@ public RestResponse buildResponse(GetFieldMappingsResponse response, XContentBui if (mappingsByIndex.isEmpty() && fields.length > 0) { status = NOT_FOUND; } - builder.startObject(); response.toXContent(builder, request); - builder.endObject(); return new BytesRestResponse(status, builder); } }); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 1d4e31d01caa1..571c8c12c5f33 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; @@ -88,14 +87,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC @Override public RestResponse buildResponse(final GetMappingsResponse response, final XContentBuilder builder) throws Exception { final ImmutableOpenMap> mappingsByIndex = response.getMappings(); - if (mappingsByIndex.isEmpty() && (indices.length != 0 || types.length != 0)) { - if (indices.length != 0 && types.length == 0) { - builder.close(); - return new BytesRestResponse(channel, new IndexNotFoundException(String.join(",", indices))); - } else { - builder.close(); - return new BytesRestResponse(channel, new TypeMissingException("_all", String.join(",", types))); - } + if (mappingsByIndex.isEmpty() && types.length != 0) { + builder.close(); + return new BytesRestResponse(channel, new TypeMissingException("_all", String.join(",", types))); } final Set typeNames = new HashSet<>(); diff --git a/server/src/main/java/org/elasticsearch/tasks/Task.java b/server/src/main/java/org/elasticsearch/tasks/Task.java index 9fd9019cd213c..f639846b418e3 100644 --- a/server/src/main/java/org/elasticsearch/tasks/Task.java +++ b/server/src/main/java/org/elasticsearch/tasks/Task.java @@ -34,6 +34,11 @@ */ public class Task { + /** + * The request header to mark tasks with specific ids + */ + public static final String X_OPAQUE_ID = "X-Opaque-Id"; + private final long id; private final String type; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 072a9a095b358..ee4c6979eccc0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -357,7 +357,7 @@ public void testSearchTaskDescriptions() { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); Map headers = new HashMap<>(); - headers.put("X-Opaque-Id", "my_id"); + headers.put(Task.X_OPAQUE_ID, "my_id"); headers.put("Foo-Header", "bar"); headers.put("Custom-Task-Header", "my_value"); assertSearchResponse( @@ -404,7 +404,7 @@ public void testSearchTaskHeaderLimit() { int maxSize = Math.toIntExact(SETTING_HTTP_MAX_HEADER_SIZE.getDefault(Settings.EMPTY).getBytes() / 2 + 1); Map headers = new HashMap<>(); - headers.put("X-Opaque-Id", "my_id"); + headers.put(Task.X_OPAQUE_ID, "my_id"); headers.put("Custom-Task-Header", randomAlphaOfLengthBetween(maxSize, maxSize + 100)); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, @@ -415,7 +415,7 @@ public void testSearchTaskHeaderLimit() { private void assertTaskHeaders(TaskInfo taskInfo) { assertThat(taskInfo.getHeaders().keySet(), hasSize(2)); - assertEquals("my_id", taskInfo.getHeaders().get("X-Opaque-Id")); + assertEquals("my_id", taskInfo.getHeaders().get(Task.X_OPAQUE_ID)); assertEquals("my_value", taskInfo.getHeaders().get("Custom-Task-Header")); } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentBaseResultTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentBaseResultTests.java new file mode 100644 index 0000000000000..bfa6c1eb9b8c3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentBaseResultTests.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.StringJoiner; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.elasticsearch.action.ingest.WriteableIngestDocumentTests.createRandomIngestDoc; + +public class SimulateDocumentBaseResultTests extends AbstractXContentTestCase { + + public void testSerialization() throws IOException { + boolean isFailure = randomBoolean(); + SimulateDocumentBaseResult simulateDocumentBaseResult = createTestInstance(isFailure); + + BytesStreamOutput out = new BytesStreamOutput(); + simulateDocumentBaseResult.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + SimulateDocumentBaseResult otherSimulateDocumentBaseResult = new SimulateDocumentBaseResult(streamInput); + + if (isFailure) { + assertThat(otherSimulateDocumentBaseResult.getIngestDocument(), equalTo(simulateDocumentBaseResult.getIngestDocument())); + assertThat(otherSimulateDocumentBaseResult.getFailure(), instanceOf(IllegalArgumentException.class)); + IllegalArgumentException e = (IllegalArgumentException) otherSimulateDocumentBaseResult.getFailure(); + assertThat(e.getMessage(), equalTo("test")); + } else { + assertIngestDocument(otherSimulateDocumentBaseResult.getIngestDocument(), simulateDocumentBaseResult.getIngestDocument()); + } + } + + static SimulateDocumentBaseResult createTestInstance(boolean isFailure) { + SimulateDocumentBaseResult simulateDocumentBaseResult; + if (isFailure) { + simulateDocumentBaseResult = new SimulateDocumentBaseResult(new IllegalArgumentException("test")); + } else { + IngestDocument ingestDocument = createRandomIngestDoc(); + simulateDocumentBaseResult = new SimulateDocumentBaseResult(ingestDocument); + } + return simulateDocumentBaseResult; + } + + private static SimulateDocumentBaseResult createTestInstanceWithFailures() { + return createTestInstance(randomBoolean()); + } + + @Override + protected SimulateDocumentBaseResult createTestInstance() { + return createTestInstance(false); + } + + @Override + protected SimulateDocumentBaseResult doParseInstance(XContentParser parser) { + return SimulateDocumentBaseResult.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // We cannot have random fields in the _source field and _ingest field + return field -> + field.contains( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.SOURCE_FIELD).toString() + ) || + field.contains( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.INGEST_FIELD).toString() + ); + } + + public static void assertEqualDocs(SimulateDocumentBaseResult response, SimulateDocumentBaseResult parsedResponse) { + assertEquals(response.getIngestDocument(), parsedResponse.getIngestDocument()); + if (response.getFailure() != null) { + assertNotNull(parsedResponse.getFailure()); + assertThat( + parsedResponse.getFailure().getMessage(), + containsString(response.getFailure().getMessage()) + ); + } else { + assertNull(parsedResponse.getFailure()); + } + } + + @Override + public void assertEqualInstances(SimulateDocumentBaseResult response, SimulateDocumentBaseResult parsedResponse) { + assertEqualDocs(response, parsedResponse); + } + + /** + * Test parsing {@link SimulateDocumentBaseResult} with inner failures as they don't support asserting on xcontent + * equivalence, given that exceptions are not parsed back as the same original class. We run the usual + * {@link AbstractXContentTestCase#testFromXContent()} without failures, and this other test with failures where + * we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = SimulateDocumentBaseResultTests::createTestInstanceWithFailures; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields(), + getShuffleFieldsExceptions(), getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, getToXContentParams()); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentSimpleResultTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentSimpleResultTests.java deleted file mode 100644 index 83aad26f6a07b..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentSimpleResultTests.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.ingest; - -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.ingest.RandomDocumentPicks; -import org.elasticsearch.ingest.IngestDocument; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; - -public class SimulateDocumentSimpleResultTests extends ESTestCase { - - public void testSerialization() throws IOException { - boolean isFailure = randomBoolean(); - SimulateDocumentBaseResult simulateDocumentBaseResult; - if (isFailure) { - simulateDocumentBaseResult = new SimulateDocumentBaseResult(new IllegalArgumentException("test")); - } else { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); - simulateDocumentBaseResult = new SimulateDocumentBaseResult(ingestDocument); - } - - BytesStreamOutput out = new BytesStreamOutput(); - simulateDocumentBaseResult.writeTo(out); - StreamInput streamInput = out.bytes().streamInput(); - SimulateDocumentBaseResult otherSimulateDocumentBaseResult = new SimulateDocumentBaseResult(streamInput); - - if (isFailure) { - assertThat(otherSimulateDocumentBaseResult.getIngestDocument(), equalTo(simulateDocumentBaseResult.getIngestDocument())); - assertThat(otherSimulateDocumentBaseResult.getFailure(), instanceOf(IllegalArgumentException.class)); - IllegalArgumentException e = (IllegalArgumentException) otherSimulateDocumentBaseResult.getFailure(); - assertThat(e.getMessage(), equalTo("test")); - } else { - assertIngestDocument(otherSimulateDocumentBaseResult.getIngestDocument(), simulateDocumentBaseResult.getIngestDocument()); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResultTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResultTests.java new file mode 100644 index 0000000000000..6b673c49efa0b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResultTests.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.ingest; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.StringJoiner; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class SimulateDocumentVerboseResultTests extends AbstractXContentTestCase { + + static SimulateDocumentVerboseResult createTestInstance(boolean withFailures) { + int numDocs = randomIntBetween(0, 5); + List results = new ArrayList<>(); + for (int i = 0; i getRandomFieldsExcludeFilter() { + // We cannot have random fields in the _source field and _ingest field + return field -> + field.contains( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.SOURCE_FIELD).toString() + ) || + field.contains( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.INGEST_FIELD).toString() + ); + } + + /** + * Test parsing {@link SimulateDocumentVerboseResult} with inner failures as they don't support asserting on xcontent + * equivalence, given that exceptions are not parsed back as the same original class. We run the usual + * {@link AbstractXContentTestCase#testFromXContent()} without failures, and this other test with failures where we + * disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = SimulateDocumentVerboseResultTests::createTestInstanceWithFailures; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields(), + getShuffleFieldsExceptions(), getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, getToXContentParams()); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java index be448a09db892..65f82ceacff59 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java @@ -21,57 +21,29 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.ingest.IngestDocument; -import org.elasticsearch.ingest.RandomDocumentPicks; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.StringJoiner; +import java.util.function.Predicate; +import java.util.function.Supplier; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; -public class SimulatePipelineResponseTests extends ESTestCase { +public class SimulatePipelineResponseTests extends AbstractXContentTestCase { public void testSerialization() throws IOException { boolean isVerbose = randomBoolean(); String id = randomBoolean() ? randomAlphaOfLengthBetween(1, 10) : null; - int numResults = randomIntBetween(1, 10); - List results = new ArrayList<>(numResults); - for (int i = 0; i < numResults; i++) { - boolean isFailure = randomBoolean(); - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); - if (isVerbose) { - int numProcessors = randomIntBetween(1, 10); - List processorResults = new ArrayList<>(numProcessors); - for (int j = 0; j < numProcessors; j++) { - String processorTag = randomAlphaOfLengthBetween(1, 10); - SimulateProcessorResult processorResult; - if (isFailure) { - processorResult = new SimulateProcessorResult(processorTag, new IllegalArgumentException("test")); - } else { - processorResult = new SimulateProcessorResult(processorTag, ingestDocument); - } - processorResults.add(processorResult); - } - results.add(new SimulateDocumentVerboseResult(processorResults)); - } else { - results.add(new SimulateDocumentBaseResult(ingestDocument)); - SimulateDocumentBaseResult simulateDocumentBaseResult; - if (isFailure) { - simulateDocumentBaseResult = new SimulateDocumentBaseResult(new IllegalArgumentException("test")); - } else { - simulateDocumentBaseResult = new SimulateDocumentBaseResult(ingestDocument); - } - results.add(simulateDocumentBaseResult); - } - } - SimulatePipelineResponse response = new SimulatePipelineResponse(id, isVerbose, results); + SimulatePipelineResponse response = createInstance(id, isVerbose, true); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); @@ -120,4 +92,97 @@ public void testSerialization() throws IOException { } } } + + static SimulatePipelineResponse createInstance(String pipelineId, boolean isVerbose, boolean withFailure) { + int numResults = randomIntBetween(1, 5); + List results = new ArrayList<>(numResults); + for (int i = 0; i < numResults; i++) { + if (isVerbose) { + results.add( + SimulateDocumentVerboseResultTests.createTestInstance(withFailure) + ); + } else { + results.add( + SimulateDocumentBaseResultTests.createTestInstance(withFailure && randomBoolean()) + ); + } + } + return new SimulatePipelineResponse(pipelineId, isVerbose, results); + } + + private static SimulatePipelineResponse createTestInstanceWithFailures() { + boolean isVerbose = randomBoolean(); + return createInstance(null, isVerbose, false); + } + + @Override + protected SimulatePipelineResponse createTestInstance() { + boolean isVerbose = randomBoolean(); + // since the pipeline id is not serialized with XContent we set it to null for equality tests. + // we test failures separately since comparing XContent is not possible with failures + return createInstance(null, isVerbose, false); + } + + @Override + protected SimulatePipelineResponse doParseInstance(XContentParser parser) { + return SimulatePipelineResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected void assertEqualInstances(SimulatePipelineResponse response, + SimulatePipelineResponse parsedResponse) { + assertEquals(response.getPipelineId(), parsedResponse.getPipelineId()); + assertEquals(response.isVerbose(), parsedResponse.isVerbose()); + assertEquals(response.getResults().size(), parsedResponse.getResults().size()); + for (int i=0; i < response.getResults().size(); i++) { + if (response.isVerbose()) { + assertThat(response.getResults().get(i), instanceOf(SimulateDocumentVerboseResult.class)); + assertThat(parsedResponse.getResults().get(i), instanceOf(SimulateDocumentVerboseResult.class)); + SimulateDocumentVerboseResult responseResult = (SimulateDocumentVerboseResult)response.getResults().get(i); + SimulateDocumentVerboseResult parsedResult = (SimulateDocumentVerboseResult)parsedResponse.getResults().get(i); + SimulateDocumentVerboseResultTests.assertEqualDocs(responseResult, parsedResult); + } else { + assertThat(response.getResults().get(i), instanceOf(SimulateDocumentBaseResult.class)); + assertThat(parsedResponse.getResults().get(i), instanceOf(SimulateDocumentBaseResult.class)); + SimulateDocumentBaseResult responseResult = (SimulateDocumentBaseResult)response.getResults().get(i); + SimulateDocumentBaseResult parsedResult = (SimulateDocumentBaseResult)parsedResponse.getResults().get(i); + SimulateDocumentBaseResultTests.assertEqualDocs(responseResult, parsedResult); + } + } + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // We cannot have random fields in the _source field and _ingest field + return field -> + field.contains( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.SOURCE_FIELD).toString() + ) || + field.contains( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.INGEST_FIELD).toString() + ); + } + + /** + * Test parsing {@link SimulatePipelineResponse} with inner failures as they don't support asserting on xcontent equivalence, given that + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = SimulatePipelineResponseTests::createTestInstanceWithFailures; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields(), getShuffleFieldsExceptions(), + getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, getToXContentParams()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateProcessorResultTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateProcessorResultTests.java index 3014a1a4ae61d..2e0d6a75749bb 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulateProcessorResultTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulateProcessorResultTests.java @@ -21,35 +21,29 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.ingest.IngestDocument; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +import java.util.StringJoiner; +import java.util.function.Predicate; +import java.util.function.Supplier; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; +import static org.elasticsearch.action.ingest.WriteableIngestDocumentTests.createRandomIngestDoc; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class SimulateProcessorResultTests extends ESTestCase { +public class SimulateProcessorResultTests extends AbstractXContentTestCase { public void testSerialization() throws IOException { - String processorTag = randomAlphaOfLengthBetween(1, 10); boolean isSuccessful = randomBoolean(); boolean isIgnoredException = randomBoolean(); - SimulateProcessorResult simulateProcessorResult; - if (isSuccessful) { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); - if (isIgnoredException) { - simulateProcessorResult = new SimulateProcessorResult(processorTag, ingestDocument, new IllegalArgumentException("test")); - } else { - simulateProcessorResult = new SimulateProcessorResult(processorTag, ingestDocument); - } - } else { - simulateProcessorResult = new SimulateProcessorResult(processorTag, new IllegalArgumentException("test")); - } + SimulateProcessorResult simulateProcessorResult = createTestInstance(isSuccessful, isIgnoredException); BytesStreamOutput out = new BytesStreamOutput(); simulateProcessorResult.writeTo(out); @@ -72,4 +66,96 @@ public void testSerialization() throws IOException { assertThat(e.getMessage(), equalTo("test")); } } + + static SimulateProcessorResult createTestInstance(boolean isSuccessful, + boolean isIgnoredException) { + String processorTag = randomAlphaOfLengthBetween(1, 10); + SimulateProcessorResult simulateProcessorResult; + if (isSuccessful) { + IngestDocument ingestDocument = createRandomIngestDoc(); + if (isIgnoredException) { + simulateProcessorResult = new SimulateProcessorResult(processorTag, ingestDocument, new IllegalArgumentException("test")); + } else { + simulateProcessorResult = new SimulateProcessorResult(processorTag, ingestDocument); + } + } else { + simulateProcessorResult = new SimulateProcessorResult(processorTag, new IllegalArgumentException("test")); + } + return simulateProcessorResult; + } + + private static SimulateProcessorResult createTestInstanceWithFailures() { + boolean isSuccessful = randomBoolean(); + boolean isIgnoredException = randomBoolean(); + return createTestInstance(isSuccessful, isIgnoredException); + } + + @Override + protected SimulateProcessorResult createTestInstance() { + // we test failures separately since comparing XContent is not possible with failures + return createTestInstance(true, false); + } + + @Override + protected SimulateProcessorResult doParseInstance(XContentParser parser) { + return SimulateProcessorResult.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // We cannot have random fields in the _source field and _ingest field + return field -> + field.startsWith( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.SOURCE_FIELD).toString() + ) || + field.startsWith( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.INGEST_FIELD).toString() + ); + } + + static void assertEqualProcessorResults(SimulateProcessorResult response, + SimulateProcessorResult parsedResponse) { + assertEquals(response.getProcessorTag(), parsedResponse.getProcessorTag()); + assertEquals(response.getIngestDocument(), parsedResponse.getIngestDocument()); + if (response.getFailure() != null ) { + assertNotNull(parsedResponse.getFailure()); + assertThat( + parsedResponse.getFailure().getMessage(), + containsString(response.getFailure().getMessage()) + ); + } else { + assertNull(parsedResponse.getFailure()); + } + } + + @Override + protected void assertEqualInstances(SimulateProcessorResult response, SimulateProcessorResult parsedResponse) { + assertEqualProcessorResults(response, parsedResponse); + } + + /** + * Test parsing {@link SimulateProcessorResult} with inner failures as they don't support asserting on xcontent equivalence, given that + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = SimulateProcessorResultTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, + getShuffleFieldsExceptions(), getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence, getToXContentParams()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java b/server/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java index 4d8e0f544c458..bc4589ff5d36c 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/WriteableIngestDocumentTests.java @@ -25,14 +25,19 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.ingest.IngestDocument; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.RandomObjects; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.StringJoiner; +import java.util.function.Predicate; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; @@ -40,7 +45,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -public class WriteableIngestDocumentTests extends ESTestCase { +public class WriteableIngestDocumentTests extends AbstractXContentTestCase { public void testEqualsAndHashcode() throws Exception { Map sourceAndMetadata = RandomDocumentPicks.randomSource(random()); @@ -147,4 +152,42 @@ public void testToXContent() throws IOException { IngestDocument serializedIngestDocument = new IngestDocument(toXContentSource, toXContentIngestMetadata); assertThat(serializedIngestDocument, equalTo(serializedIngestDocument)); } + + static IngestDocument createRandomIngestDoc() { + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference sourceBytes = RandomObjects.randomSource(random(), xContentType); + Map randomSource = XContentHelper.convertToMap(sourceBytes, false, xContentType).v2(); + return RandomDocumentPicks.randomIngestDocument(random(), randomSource); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected WriteableIngestDocument createTestInstance() { + return new WriteableIngestDocument(createRandomIngestDoc()); + } + + @Override + protected WriteableIngestDocument doParseInstance(XContentParser parser) { + return WriteableIngestDocument.fromXContent(parser); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // We cannot have random fields in the _source field and _ingest field + return field -> + field.startsWith( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.SOURCE_FIELD).toString() + ) || + field.startsWith( + new StringJoiner(".") + .add(WriteableIngestDocument.DOC_FIELD) + .add(WriteableIngestDocument.INGEST_FIELD).toString() + ); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java index 812dfd8f6f686..e1fbc47c4a022 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java @@ -136,8 +136,7 @@ public void testAddWriteOnlyWithNoExistingAliases() { ClusterState after = service.innerExecute(before, Arrays.asList( new AliasAction.Add("test", "alias", null, null, null, false))); assertFalse(after.metaData().index("test").getAliases().get("alias").writeIndex()); - assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), - equalTo(after.metaData().index("test"))); + assertNull(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex()); after = service.innerExecute(before, Arrays.asList( new AliasAction.Add("test", "alias", null, null, null, null))); diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 23d2f7bcafa96..adb7a087367d2 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index; import org.elasticsearch.Version; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; @@ -34,12 +35,15 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Collections; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; @@ -166,10 +170,12 @@ public void testSlowLogSearchContextPrinterToLog() throws IOException { SearchContext searchContext = createSearchContext(index); SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); searchContext.request().source(source); + searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"))); SearchSlowLog.SlowLogSearchContextPrinter p = new SearchSlowLog.SlowLogSearchContextPrinter(searchContext, 10); assertThat(p.toString(), startsWith("[foo][0]")); // Makes sure that output doesn't contain any new lines assertThat(p.toString(), not(containsString("\n"))); + assertThat(p.toString(), endsWith("id[my_id], ")); } public void testLevelSetting() { diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java index 5970e1121bdee..cee95d3a377ca 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -222,9 +222,7 @@ public void testSimpleGetFieldMappingsWithPretty() throws Exception { params.put("pretty", "true"); GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("index").setTypes("type").setFields("field1", "obj.subfield").get(); XContentBuilder responseBuilder = XContentFactory.jsonBuilder().prettyPrint(); - responseBuilder.startObject(); response.toXContent(responseBuilder, new ToXContent.MapParams(params)); - responseBuilder.endObject(); String responseStrings = Strings.toString(responseBuilder); @@ -236,9 +234,7 @@ public void testSimpleGetFieldMappingsWithPretty() throws Exception { response = client().admin().indices().prepareGetFieldMappings("index").setTypes("type").setFields("field1", "obj.subfield").get(); responseBuilder = XContentFactory.jsonBuilder().prettyPrint().lfAtEnd(); - responseBuilder.startObject(); response.toXContent(responseBuilder, new ToXContent.MapParams(params)); - responseBuilder.endObject(); responseStrings = Strings.toString(responseBuilder); prettyJsonBuilder = XContentFactory.jsonBuilder().prettyPrint(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java index a509645495858..06eefb7ccba14 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java @@ -187,9 +187,9 @@ public static BytesReference randomSource(Random random, XContentType xContentTy * Randomly adds fields, objects, or arrays to the provided builder. The maximum depth is 5. */ private static void addFields(Random random, XContentBuilder builder, int minNumFields, int currentDepth) throws IOException { - int numFields = randomIntBetween(random, minNumFields, 10); + int numFields = randomIntBetween(random, minNumFields, 5); for (int i = 0; i < numFields; i++) { - if (currentDepth < 5 && random.nextBoolean()) { + if (currentDepth < 5 && random.nextInt(100) >= 70) { if (random.nextBoolean()) { builder.startObject(RandomStrings.randomAsciiOfLengthBetween(random, 6, 10)); addFields(random, builder, minNumFields, currentDepth + 1); diff --git a/x-pack/docs/en/watcher/encrypting-data.asciidoc b/x-pack/docs/en/watcher/encrypting-data.asciidoc index 166ef6f14d760..9319c9f793870 100644 --- a/x-pack/docs/en/watcher/encrypting-data.asciidoc +++ b/x-pack/docs/en/watcher/encrypting-data.asciidoc @@ -6,6 +6,12 @@ information or details about your SMTP email service. You can encrypt this data by generating a key and adding some secure settings on each node in your cluster. +Every `password` field that is used in your watch within a HTTP basic +authentication block - for example within a webhook, a HTTP input or when using +the reporting email attachment - will not be stored as plain text anymore. Also +be aware, that there is no way to configure your own fields in a watch to be +encrypted. + To encrypt sensitive data in {watcher}: . Use the {ref}/syskeygen.html[elasticsearch-syskeygen] command to create a system key file. diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java index 2c8c700fcf615..a96dc8ebb127a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java @@ -468,7 +468,7 @@ public void testHttpExporterShutdown() throws Exception { final MultiHttpResource resource = mock(MultiHttpResource.class); if (sniffer != null && rarely()) { - doThrow(randomFrom(new IOException("expected"), new RuntimeException("expected"))).when(sniffer).close(); + doThrow(new RuntimeException("expected")).when(sniffer).close(); } if (rarely()) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 3ba3611293fdc..5abd701ce4b2e 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -415,7 +415,11 @@ private QueryBuilder createBoundaryQuery(Map position) { DateHistoGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHisto(); String fieldName = dateHisto.getField(); String rollupFieldName = fieldName + "." + DateHistogramAggregationBuilder.NAME; - long lowerBound = position != null ? (long) position.get(rollupFieldName) : 0; + long lowerBound = 0L; + if (position != null) { + Number value = (Number) position.get(rollupFieldName); + lowerBound = value.longValue(); + } assert lowerBound <= maxBoundary; final RangeQueryBuilder query = new RangeQueryBuilder(fieldName) .gte(lowerBound) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml new file mode 100644 index 0000000000000..4bea2f655e624 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml @@ -0,0 +1,73 @@ +--- +"Test put watch api without version overwrites watch": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "actions": { + "logging": { + "logging": { + "text": "yaml test" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { watch.input.simple.foo: "bar" } + + # change the simple input fields, then ensure the old + # field does not exist on get + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "spam": "eggs" + } + }, + "actions": { + "logging": { + "logging": { + "text": "yaml test" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { watch.input.simple.spam: "eggs" } + - is_false: watch.input.simple.foo + diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java index 482a9a839ce8c..7403e0778aec0 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java @@ -110,16 +110,6 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { return operation; } - // the watch status is -1, in case a watch has been freshly stored and this save - // watch operation does not stem from an execution - // we dont need to update the trigger service, when the watch has been updated as - // part of an execution, so we can exit early - boolean isWatchExecutionOperation = watch.status().version() != -1; - if (isWatchExecutionOperation) { - logger.debug("not updating trigger for watch [{}], watch has been updated as part of an execution", watch.id()); - return operation; - } - boolean shouldBeTriggered = shardAllocationConfiguration.shouldBeTriggered(watch.id()); if (shouldBeTriggered) { if (watch.status().state().isActive()) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java index 17495bcad1ceb..4d4b184e97305 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java @@ -7,6 +7,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; @@ -99,27 +101,45 @@ protected void masterOperation(PutWatchRequest request, ClusterState state, try (XContentBuilder builder = jsonBuilder()) { watch.toXContent(builder, DEFAULT_PARAMS); - UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()); - updateRequest.docAsUpsert(isUpdate == false); - updateRequest.version(request.getVersion()); - updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - updateRequest.doc(builder); + if (isUpdate) { + UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()); + updateRequest.version(request.getVersion()); + updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + updateRequest.doc(builder); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, updateRequest, - ActionListener.wrap(response -> { + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, updateRequest, + ActionListener.wrap(response -> { + boolean created = response.getResult() == DocWriteResponse.Result.CREATED; + if (shouldBeTriggeredLocally(request, watch)) { + triggerService.add(watch); + } + listener.onResponse(new PutWatchResponse(response.getId(), response.getVersion(), created)); + }, listener::onFailure), + client::update); + } else { + IndexRequest indexRequest = new IndexRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()); + indexRequest.source(builder); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, indexRequest, + ActionListener.wrap(response -> { boolean created = response.getResult() == DocWriteResponse.Result.CREATED; // if not yet in distributed mode (mixed 5/6 version in cluster), only trigger on the master node - if (localExecute(request) == false && - this.clusterService.state().nodes().isLocalNodeElectedMaster() && - watch.status().state().isActive()) { + if (shouldBeTriggeredLocally(request, watch)) { triggerService.add(watch); } listener.onResponse(new PutWatchResponse(response.getId(), response.getVersion(), created)); }, listener::onFailure), - client::update); + client::index); + } } } catch (Exception e) { listener.onFailure(e); } } + + private boolean shouldBeTriggeredLocally(PutWatchRequest request, Watch watch) { + return localExecute(request) == false && + this.clusterService.state().nodes().isLocalNodeElectedMaster() && + watch.status().state().isActive(); + } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java index 0844f573cc041..b4cd630db128c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java @@ -167,6 +167,7 @@ public void testWatchesAreNotTriggeredOnNonMasterWhenNotDistributed() throws Exc public void testWatchesAreTriggeredOnMasterWhenNotDistributed() throws Exception { PutWatchRequest putWatchRequest = new PutWatchRequest(); putWatchRequest.setId("_id"); + putWatchRequest.setVersion(randomLongBetween(1, 100)); ClusterState clusterState = ClusterState.builder(new ClusterName("my_cluster")) .nodes(DiscoveryNodes.builder() diff --git a/x-pack/qa/core-rest-tests-with-security/build.gradle b/x-pack/qa/core-rest-tests-with-security/build.gradle index 1daae6dc9f50a..1ef0a3a98ec6e 100644 --- a/x-pack/qa/core-rest-tests-with-security/build.gradle +++ b/x-pack/qa/core-rest-tests-with-security/build.gradle @@ -15,7 +15,6 @@ integTestRunner { ['cat.aliases/10_basic/Empty cluster', 'index/10_with_id/Index with ID', 'indices.get_alias/10_basic/Get alias against closed indices', - 'indices.get_alias/20_empty/Check empty aliases when getting all aliases via /_alias', 'cat.templates/10_basic/No templates', 'cat.templates/10_basic/Sort templates', 'cat.templates/10_basic/Multiple template', diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 2cea5f462d8b6..5a42a0632d210 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -9,6 +9,7 @@ import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Booleans; @@ -30,6 +31,7 @@ import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; +import org.hamcrest.Matcher; import org.junit.Before; import java.io.IOException; @@ -38,6 +40,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -274,6 +277,71 @@ public void testWatcher() throws Exception { } } + /** + * Tests that a RollUp job created on a old cluster is correctly restarted after the upgrade. + */ + public void testRollupAfterRestart() throws Exception { + assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); + if (runningAgainstOldCluster) { + final int numDocs = 59; + final int year = randomIntBetween(1970, 2018); + + // index documents for the rollup job + final StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < numDocs; i++) { + bulk.append("{\"index\":{\"_index\":\"rollup-docs\",\"_type\":\"doc\"}}\n"); + String date = String.format(Locale.ROOT, "%04d-01-01T00:%02d:00Z", year, i); + bulk.append("{\"timestamp\":\"").append(date).append("\",\"value\":").append(i).append("}\n"); + } + bulk.append("\r\n"); + + final Request bulkRequest = new Request("POST", "/_bulk"); + bulkRequest.setJsonEntity(bulk.toString()); + client().performRequest(bulkRequest); + + // create the rollup job + final Request createRollupJobRequest = new Request("PUT", "/_xpack/rollup/job/rollup-job-test"); + createRollupJobRequest.setJsonEntity("{" + + "\"index_pattern\":\"rollup-*\"," + + "\"rollup_index\":\"results-rollup\"," + + "\"cron\":\"*/30 * * * * ?\"," + + "\"page_size\":100," + + "\"groups\":{" + + " \"date_histogram\":{" + + " \"field\":\"timestamp\"," + + " \"interval\":\"5m\"" + + " }" + + "}," + + "\"metrics\":[" + + " {\"field\":\"value\",\"metrics\":[\"min\",\"max\",\"sum\"]}" + + "]" + + "}"); + + Map createRollupJobResponse = toMap(client().performRequest(createRollupJobRequest)); + assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + + // start the rollup job + final Request startRollupJobRequest = new Request("POST", "_xpack/rollup/job/rollup-job-test/_start"); + Map startRollupJobResponse = toMap(client().performRequest(startRollupJobRequest)); + assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); + + assertRollUpJob("rollup-job-test"); + + } else { + + final Request clusterHealthRequest = new Request("GET", "/_cluster/health"); + clusterHealthRequest.addParameter("wait_for_status", "yellow"); + clusterHealthRequest.addParameter("wait_for_no_relocating_shards", "true"); + if (oldClusterVersion.onOrAfter(Version.V_6_2_0)) { + clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); + } + Map clusterHealthResponse = toMap(client().performRequest(clusterHealthRequest)); + assertThat(clusterHealthResponse.get("timed_out"), equalTo(Boolean.FALSE)); + + assertRollUpJob("rollup-job-test"); + } + } + public void testSqlFailsOnIndexWithTwoTypes() throws IOException { // TODO this isn't going to trigger until we backport to 6.1 assumeTrue("It is only possible to build an index that sql doesn't like before 6.0.0", @@ -526,4 +594,48 @@ private void assertRoleInfo(final String role) throws Exception { assertNotNull(response.get("cluster")); assertNotNull(response.get("indices")); } + + @SuppressWarnings("unchecked") + private void assertRollUpJob(final String rollupJob) throws Exception { + final Matcher expectedStates = anyOf(equalTo("indexing"), equalTo("started")); + waitForRollUpJob(rollupJob, expectedStates); + + // check that the rollup job is started using the RollUp API + final Request getRollupJobRequest = new Request("GET", "_xpack/rollup/job/" + rollupJob); + Map getRollupJobResponse = toMap(client().performRequest(getRollupJobRequest)); + assertThat(ObjectPath.eval("jobs.0.status.job_state", getRollupJobResponse), expectedStates); + + // check that the rollup job is started using the Tasks API + final Request taskRequest = new Request("GET", "_tasks"); + taskRequest.addParameter("detailed", "true"); + taskRequest.addParameter("actions", "xpack/rollup/*"); + Map taskResponse = toMap(client().performRequest(taskRequest)); + Map taskResponseNodes = (Map) taskResponse.get("nodes"); + Map taskResponseNode = (Map) taskResponseNodes.values().iterator().next(); + Map taskResponseTasks = (Map) taskResponseNode.get("tasks"); + Map taskResponseStatus = (Map) taskResponseTasks.values().iterator().next(); + assertThat(ObjectPath.eval("status.job_state", taskResponseStatus), expectedStates); + + // check that the rollup job is started using the Cluster State API + final Request clusterStateRequest = new Request("GET", "_cluster/state/metadata"); + Map clusterStateResponse = toMap(client().performRequest(clusterStateRequest)); + Map rollupJobTask = ObjectPath.eval("metadata.persistent_tasks.tasks.0", clusterStateResponse); + assertThat(ObjectPath.eval("id", rollupJobTask), equalTo("rollup-job-test")); + + // Persistent task state field has been renamed in 6.4.0 from "status" to "state" + final String stateFieldName = (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_4_0)) ? "status" : "state"; + + final String jobStateField = "task.xpack/rollup/job." + stateFieldName + ".job_state"; + assertThat("Expected field [" + jobStateField + "] to be started or indexing in " + rollupJobTask, + ObjectPath.eval(jobStateField, rollupJobTask), expectedStates); + } + + private void waitForRollUpJob(final String rollupJob, final Matcher expectedStates) throws Exception { + assertBusy(() -> { + final Request getRollupJobRequest = new Request("GET", "_xpack/rollup/job/" + rollupJob); + Response getRollupJobResponse = client().performRequest(getRollupJobRequest); + assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + assertThat(ObjectPath.eval("jobs.0.status.job_state", toMap(getRollupJobResponse)), expectedStates); + }, 30L, TimeUnit.SECONDS); + } }