expansions = [
'container': azureContainer,
'base_path': azureBasePath
]
+
processTestResources {
inputs.properties(expansions)
MavenFilteringHack.filter(it, expansions)
diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java
index 2f74c00ef92e2..f906b9fa9a913 100644
--- a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java
+++ b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java
@@ -18,132 +18,332 @@
*/
package org.elasticsearch.repositories.azure;
-import com.sun.net.httpserver.HttpExchange;
-import com.sun.net.httpserver.HttpHandler;
-import com.sun.net.httpserver.HttpServer;
-import org.elasticsearch.common.SuppressForbidden;
-import org.elasticsearch.common.io.Streams;
-import org.elasticsearch.mocksocket.MockHttpServer;
-import org.elasticsearch.repositories.azure.AzureStorageTestServer.Response;
+import org.elasticsearch.test.fixture.AbstractHttpFixture;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.path.PathTrie;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.RestUtils;
-import java.io.ByteArrayOutputStream;
import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.net.Inet6Address;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.SocketAddress;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.StandardCopyOption;
+import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.Collections.emptyMap;
-import static java.util.Collections.singleton;
-import static java.util.Collections.singletonList;
/**
- * {@link AzureStorageFixture} is a fixture that emulates an Azure Storage service.
+ * {@link AzureStorageFixture} emulates an Azure Storage service.
*
- * It starts an asynchronous socket server that binds to a random local port. The server parses
- * HTTP requests and uses a {@link AzureStorageTestServer} to handle them before returning
- * them to the client as HTTP responses.
+ * The implementation is based on official documentation available at
+ * https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api.
*/
-public class AzureStorageFixture {
+public class AzureStorageFixture extends AbstractHttpFixture {
- public static void main(String[] args) throws Exception {
+ /**
+ * List of the containers stored on this test server
+ **/
+ private final Map containers = ConcurrentCollections.newConcurrentMap();
+
+ /**
+ * Request handlers for the requests made by the Azure client
+ **/
+ private final PathTrie handlers;
+
+ /**
+ * Creates a {@link AzureStorageFixture} with a custom endpoint
+ */
+ private AzureStorageFixture(final String workingDir, final String container) {
+ super(workingDir);
+ this.containers.put(container, new Container(container));
+ this.handlers = defaultHandlers(containers);
+ }
+
+ @Override
+ protected AbstractHttpFixture.Response handle(final Request request) throws IOException {
+ final RequestHandler handler = handlers.retrieve(request.getMethod() + " " + request.getPath(), request.getParameters());
+ if (handler != null) {
+ final String authorization = request.getHeader("Authorization");
+ if (authorization == null
+ || (authorization.length() > 0 && authorization.contains("azure_integration_test_account") == false)) {
+ return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Access Denied");
+ }
+ return handler.handle(request);
+ }
+ return null;
+ }
+
+ public static void main(final String[] args) throws Exception {
if (args == null || args.length != 2) {
throw new IllegalArgumentException("AzureStorageFixture ");
}
- final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0);
- final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0);
+ final AzureStorageFixture fixture = new AzureStorageFixture(args[0], args[1]);
+ fixture.listen();
+ }
+
+ /**
+ * Builds the default request handlers
+ **/
+ private static PathTrie defaultHandlers(final Map containers) {
+ final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER);
+
+ // Get Blob Properties
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties
+ objectsPaths("HEAD /{container}").forEach(path ->
+ handlers.insert(path, (request) -> {
+ final String containerName = request.getParam("container");
+
+ final Container container = containers.get(containerName);
+ if (container == null) {
+ return newContainerNotFoundError(request.getId());
+ }
+
+ final String blobName = objectName(request.getParameters());
+ for (Map.Entry object : container.objects.entrySet()) {
+ if (object.getKey().equals(blobName)) {
+ Map responseHeaders = new HashMap<>();
+ responseHeaders.put("x-ms-blob-content-length", String.valueOf(object.getValue().length));
+ responseHeaders.put("x-ms-blob-type", "blockblob");
+ return new Response(RestStatus.OK.getStatus(), responseHeaders, EMPTY_BYTE);
+ }
+ }
+ return newBlobNotFoundError(request.getId());
+ })
+ );
- try {
- final Path workingDirectory = workingDir(args[0]);
- /// Writes the PID of the current Java process in a `pid` file located in the working directory
- writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]);
+ // PUT Blob
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob
+ objectsPaths("PUT /{container}").forEach(path ->
+ handlers.insert(path, (request) -> {
+ final String destContainerName = request.getParam("container");
+ final String destBlobName = objectName(request.getParameters());
- final String addressAndPort = addressToString(httpServer.getAddress());
- // Writes the address and port of the http server in a `ports` file located in the working directory
- writeFile(workingDirectory, "ports", addressAndPort);
+ final Container destContainer = containers.get(destContainerName);
+ if (destContainer == null) {
+ return newContainerNotFoundError(request.getId());
+ }
- // Emulates Azure
- final String storageUrl = "http://" + addressAndPort;
- final AzureStorageTestServer testServer = new AzureStorageTestServer(storageUrl);
- testServer.createContainer(args[1]);
+ byte[] existingBytes = destContainer.objects.putIfAbsent(destBlobName, request.getBody());
+ if (existingBytes != null) {
+ return newBlobAlreadyExistsError(request.getId());
+ }
- httpServer.createContext("/", new ResponseHandler(testServer));
- httpServer.start();
+ return new Response(RestStatus.CREATED.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE); })
+ );
- // Wait to be killed
- Thread.sleep(Long.MAX_VALUE);
+ // GET Object
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
+ objectsPaths("GET /{container}").forEach(path ->
+ handlers.insert(path, (request) -> {
+ final String containerName = request.getParam("container");
+
+ final Container container = containers.get(containerName);
+ if (container == null) {
+ return newContainerNotFoundError(request.getId());
+ }
+
+ final String blobName = objectName(request.getParameters());
+ if (container.objects.containsKey(blobName)) {
+ Map responseHeaders = new HashMap<>(contentType("application/octet-stream"));
+ responseHeaders.put("x-ms-copy-status", "success");
+ responseHeaders.put("x-ms-blob-type", "blockblob");
+ return new Response(RestStatus.OK.getStatus(), responseHeaders, container.objects.get(blobName));
+
+ }
+ return newBlobNotFoundError(request.getId());
+ })
+ );
+
+ // Delete Blob
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob
+ objectsPaths("DELETE /{container}").forEach(path ->
+ handlers.insert(path, (request) -> {
+ final String containerName = request.getParam("container");
+
+ final Container container = containers.get(containerName);
+ if (container == null) {
+ return newContainerNotFoundError(request.getId());
+ }
+
+ final String blobName = objectName(request.getParameters());
+ if (container.objects.remove(blobName) != null) {
+ return new Response(RestStatus.ACCEPTED.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
+ }
+ return newBlobNotFoundError(request.getId());
+ })
+ );
+
+ // List Blobs
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs
+ handlers.insert("GET /{container}/", (request) -> {
+ final String containerName = request.getParam("container");
+
+ final Container container = containers.get(containerName);
+ if (container == null) {
+ return newContainerNotFoundError(request.getId());
+ }
+
+ final String prefix = request.getParam("prefix");
+ return newEnumerationResultsResponse(request.getId(), container, prefix);
+ });
+
+ // Get Container Properties
+ //
+ // https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
+ handlers.insert("HEAD /{container}", (request) -> {
+ String container = request.getParam("container");
+ if (Strings.hasText(container) && containers.containsKey(container)) {
+ return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
+ } else {
+ return newContainerNotFoundError(request.getId());
+ }
+ });
- } finally {
- httpServer.stop(0);
+ return handlers;
+ }
+
+ /**
+ * Represents a Azure Storage container.
+ */
+ static class Container {
+
+ /**
+ * Container name
+ **/
+ final String name;
+
+ /**
+ * Blobs contained in the container
+ **/
+ final Map objects;
+
+ Container(final String name) {
+ this.name = Objects.requireNonNull(name);
+ this.objects = ConcurrentCollections.newConcurrentMap();
}
}
- @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here")
- private static Path workingDir(final String dir) {
- return Paths.get(dir);
+ /**
+ * Decline a path like "http://host:port/{bucket}" into 10 derived paths like:
+ * - http://host:port/{bucket}/{path0}
+ * - http://host:port/{bucket}/{path0}/{path1}
+ * - http://host:port/{bucket}/{path0}/{path1}/{path2}
+ * - etc
+ */
+ private static List objectsPaths(final String path) {
+ final List paths = new ArrayList<>();
+ String p = path;
+ for (int i = 0; i < 10; i++) {
+ p = p + "/{path" + i + "}";
+ paths.add(p);
+ }
+ return paths;
}
- private static void writeFile(final Path dir, final String fileName, final String content) throws IOException {
- final Path tempPidFile = Files.createTempFile(dir, null, null);
- Files.write(tempPidFile, singleton(content));
- Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE);
+ /**
+ * Retrieves the object name from all derived paths named {pathX} where 0 <= X < 10.
+ *
+ * This is the counterpart of {@link #objectsPaths(String)}
+ */
+ private static String objectName(final Map params) {
+ final StringBuilder name = new StringBuilder();
+ for (int i = 0; i < 10; i++) {
+ String value = params.getOrDefault("path" + i, null);
+ if (value != null) {
+ if (name.length() > 0) {
+ name.append('/');
+ }
+ name.append(value);
+ }
+ }
+ return name.toString();
}
- private static String addressToString(final SocketAddress address) {
- final InetSocketAddress inetSocketAddress = (InetSocketAddress) address;
- if (inetSocketAddress.getAddress() instanceof Inet6Address) {
- return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort();
+
+ /**
+ * Azure EnumerationResults Response
+ */
+ private static Response newEnumerationResultsResponse(final long requestId, final Container container, final String prefix) {
+ final String id = Long.toString(requestId);
+ final StringBuilder response = new StringBuilder();
+ response.append("");
+ response.append("");
+ if (prefix != null) {
+ response.append("").append(prefix).append("");
} else {
- return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort();
+ response.append("");
+ }
+ response.append("").append(container.objects.size()).append("");
+ response.append("");
+
+ int count = 0;
+ for (Map.Entry object : container.objects.entrySet()) {
+ String objectName = object.getKey();
+ if (prefix == null || objectName.startsWith(prefix)) {
+ response.append("");
+ response.append("").append(objectName).append("");
+ response.append("");
+ response.append("").append(object.getValue().length).append("");
+ response.append("").append(count++).append("");
+ response.append("success");
+ response.append("BlockBlob");
+ response.append("");
+ response.append("");
+ }
}
+
+ response.append("");
+ response.append("");
+ response.append("");
+
+ final Map headers = new HashMap<>(contentType("application/xml"));
+ headers.put("x-ms-request-id", id);
+
+ return new Response(RestStatus.OK.getStatus(), headers, response.toString().getBytes(UTF_8));
}
- static class ResponseHandler implements HttpHandler {
+ private static Response newContainerNotFoundError(final long requestId) {
+ return newError(requestId, RestStatus.NOT_FOUND, "ContainerNotFound", "The specified container does not exist");
+ }
- private final AzureStorageTestServer server;
+ private static Response newBlobNotFoundError(final long requestId) {
+ return newError(requestId, RestStatus.NOT_FOUND, "BlobNotFound", "The specified blob does not exist");
+ }
- private ResponseHandler(final AzureStorageTestServer server) {
- this.server = server;
- }
+ private static Response newBlobAlreadyExistsError(final long requestId) {
+ return newError(requestId, RestStatus.CONFLICT, "BlobAlreadyExists", "The specified blob already exists");
+ }
- @Override
- public void handle(HttpExchange exchange) throws IOException {
- String method = exchange.getRequestMethod();
- String path = server.getEndpoint() + exchange.getRequestURI().getRawPath();
- String query = exchange.getRequestURI().getRawQuery();
- Map> headers = exchange.getRequestHeaders();
- ByteArrayOutputStream out = new ByteArrayOutputStream();
- Streams.copy(exchange.getRequestBody(), out);
-
- Response response = null;
-
- final String userAgent = exchange.getRequestHeaders().getFirst("User-Agent");
- if (userAgent != null && userAgent.startsWith("Apache Ant")) {
- // This is a request made by the AntFixture, just reply "OK"
- response = new Response(RestStatus.OK, emptyMap(), "text/plain; charset=utf-8", "OK".getBytes(UTF_8));
- } else {
- // Otherwise simulate a S3 response
- response = server.handle(method, path, query, headers, out.toByteArray());
- }
+ /**
+ * Azure Error
+ *
+ * https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2
+ */
+ private static Response newError(final long requestId,
+ final RestStatus status,
+ final String code,
+ final String message) {
- Map> responseHeaders = exchange.getResponseHeaders();
- responseHeaders.put("Content-Type", singletonList(response.contentType));
- response.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v)));
- exchange.sendResponseHeaders(response.status.getStatus(), response.body.length);
- if (response.body.length > 0) {
- exchange.getResponseBody().write(response.body);
- }
- exchange.close();
- }
+ final StringBuilder response = new StringBuilder();
+ response.append("");
+ response.append("");
+ response.append("").append(code).append("
");
+ response.append("").append(message).append("");
+ response.append("");
+
+ final Map headers = new HashMap<>(contentType("application/xml"));
+ headers.put("x-ms-request-id", String.valueOf(requestId));
+ headers.put("x-ms-error-code", code);
+
+ return new Response(status.getStatus(), headers, response.toString().getBytes(UTF_8));
}
}
diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java
deleted file mode 100644
index 8183ee5043ec8..0000000000000
--- a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.repositories.azure;
-
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.path.PathTrie;
-import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
-import org.elasticsearch.rest.RestStatus;
-import org.elasticsearch.rest.RestUtils;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.Collections.emptyMap;
-import static java.util.Collections.singletonMap;
-
-/**
- * {@link AzureStorageTestServer} emulates an Azure Storage service through a {@link #handle(String, String, String, Map, byte[])}
- * method that provides appropriate responses for specific requests like the real Azure platform would do.
- * It is based on official documentation available at https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api.
- */
-public class AzureStorageTestServer {
-
- private static byte[] EMPTY_BYTE = new byte[0];
-
- /** List of the containers stored on this test server **/
- private final Map containers = ConcurrentCollections.newConcurrentMap();
-
- /** Request handlers for the requests made by the Azure client **/
- private final PathTrie handlers;
-
- /** Server endpoint **/
- private final String endpoint;
-
- /** Increments for the requests ids **/
- private final AtomicLong requests = new AtomicLong(0);
-
- /**
- * Creates a {@link AzureStorageTestServer} with a custom endpoint
- */
- AzureStorageTestServer(final String endpoint) {
- this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null");
- this.handlers = defaultHandlers(endpoint, containers);
- }
-
- /** Creates a container in the test server **/
- void createContainer(final String containerName) {
- containers.put(containerName, new Container(containerName));
- }
-
- public String getEndpoint() {
- return endpoint;
- }
-
- /**
- * Returns a response for the given request
- *
- * @param method the HTTP method of the request
- * @param path the path of the URL of the request
- * @param query the queryString of the URL of request
- * @param headers the HTTP headers of the request
- * @param body the HTTP request body
- * @return a {@link Response}
- * @throws IOException if something goes wrong
- */
- public Response handle(final String method,
- final String path,
- final String query,
- final Map> headers,
- byte[] body) throws IOException {
-
- final long requestId = requests.incrementAndGet();
-
- final Map params = new HashMap<>();
- if (query != null) {
- RestUtils.decodeQueryString(query, 0, params);
- }
-
- final RequestHandler handler = handlers.retrieve(method + " " + path, params);
- if (handler != null) {
- return handler.execute(params, headers, body, requestId);
- } else {
- return newInternalError(requestId);
- }
- }
-
- @FunctionalInterface
- interface RequestHandler {
-
- /**
- * Simulates the execution of a Azure Storage request and returns a corresponding response.
- *
- * @param params the request's query string parameters
- * @param headers the request's headers
- * @param body the request body provided as a byte array
- * @param requestId a unique id for the incoming request
- * @return the corresponding response
- *
- * @throws IOException if something goes wrong
- */
- Response execute(Map params, Map> headers, byte[] body, long requestId) throws IOException;
- }
-
- /** Builds the default request handlers **/
- private static PathTrie defaultHandlers(final String endpoint, final Map containers) {
- final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER);
-
- // Get Blob Properties
- //
- // https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties
- objectsPaths("HEAD " + endpoint + "/{container}").forEach(path ->
- handlers.insert(path, (params, headers, body, requestId) -> {
- final String containerName = params.get("container");
-
- final Container container =containers.get(containerName);
- if (container == null) {
- return newContainerNotFoundError(requestId);
- }
-
- final String blobName = objectName(params);
- for (Map.Entry object : container.objects.entrySet()) {
- if (object.getKey().equals(blobName)) {
- Map responseHeaders = new HashMap<>();
- responseHeaders.put("x-ms-blob-content-length", String.valueOf(object.getValue().length));
- responseHeaders.put("x-ms-blob-type", "blockblob");
- return new Response(RestStatus.OK, responseHeaders, "text/plain", EMPTY_BYTE);
- }
- }
- return newBlobNotFoundError(requestId);
- })
- );
-
- // PUT Blob
- //
- // https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob
- objectsPaths("PUT " + endpoint + "/{container}").forEach(path ->
- handlers.insert(path, (params, headers, body, requestId) -> {
- final String destContainerName = params.get("container");
- final String destBlobName = objectName(params);
-
- final Container destContainer =containers.get(destContainerName);
- if (destContainer == null) {
- return newContainerNotFoundError(requestId);
- }
-
- byte[] existingBytes = destContainer.objects.putIfAbsent(destBlobName, body);
- if (existingBytes != null) {
- return newBlobAlreadyExistsError(requestId);
- }
-
- return new Response(RestStatus.CREATED, emptyMap(), "text/plain", EMPTY_BYTE);
- })
- );
-
- // GET Object
- //
- // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
- objectsPaths("GET " + endpoint + "/{container}").forEach(path ->
- handlers.insert(path, (params, headers, body, requestId) -> {
- final String containerName = params.get("container");
-
- final Container container =containers.get(containerName);
- if (container == null) {
- return newContainerNotFoundError(requestId);
- }
-
- final String blobName = objectName(params);
- if (container.objects.containsKey(blobName)) {
- Map responseHeaders = new HashMap<>();
- responseHeaders.put("x-ms-copy-status", "success");
- responseHeaders.put("x-ms-blob-type", "blockblob");
- return new Response(RestStatus.OK, responseHeaders, "application/octet-stream", container.objects.get(blobName));
-
- }
- return newBlobNotFoundError(requestId);
- })
- );
-
- // Delete Blob
- //
- // https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob
- objectsPaths("DELETE " + endpoint + "/{container}").forEach(path ->
- handlers.insert(path, (params, headers, body, requestId) -> {
- final String containerName = params.get("container");
-
- final Container container =containers.get(containerName);
- if (container == null) {
- return newContainerNotFoundError(requestId);
- }
-
- final String blobName = objectName(params);
- if (container.objects.remove(blobName) != null) {
- return new Response(RestStatus.ACCEPTED, emptyMap(), "text/plain", EMPTY_BYTE);
- }
- return newBlobNotFoundError(requestId);
- })
- );
-
- // List Blobs
- //
- // https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs
- handlers.insert("GET " + endpoint + "/{container}/", (params, headers, body, requestId) -> {
- final String containerName = params.get("container");
-
- final Container container =containers.get(containerName);
- if (container == null) {
- return newContainerNotFoundError(requestId);
- }
-
- final String prefix = params.get("prefix");
- return newEnumerationResultsResponse(requestId, container, prefix);
- });
-
- // Get Container Properties
- //
- // https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
- handlers.insert("HEAD " + endpoint + "/{container}", (params, headers, body, requestId) -> {
- String container = params.get("container");
- if (Strings.hasText(container) && containers.containsKey(container)) {
- return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE);
- } else {
- return newContainerNotFoundError(requestId);
- }
- });
-
- return handlers;
- }
-
- /**
- * Represents a Azure Storage container.
- */
- static class Container {
-
- /** Container name **/
- final String name;
-
- /** Blobs contained in the container **/
- final Map objects;
-
- Container(final String name) {
- this.name = Objects.requireNonNull(name);
- this.objects = ConcurrentCollections.newConcurrentMap();
- }
- }
-
- /**
- * Represents a HTTP Response.
- */
- static class Response {
-
- final RestStatus status;
- final Map headers;
- final String contentType;
- final byte[] body;
-
- Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) {
- this.status = Objects.requireNonNull(status);
- this.headers = Objects.requireNonNull(headers);
- this.contentType = Objects.requireNonNull(contentType);
- this.body = Objects.requireNonNull(body);
- }
- }
-
- /**
- * Decline a path like "http://host:port/{bucket}" into 10 derived paths like:
- * - http://host:port/{bucket}/{path0}
- * - http://host:port/{bucket}/{path0}/{path1}
- * - http://host:port/{bucket}/{path0}/{path1}/{path2}
- * - etc
- */
- private static List objectsPaths(final String path) {
- final List paths = new ArrayList<>();
- String p = path;
- for (int i = 0; i < 10; i++) {
- p = p + "/{path" + i + "}";
- paths.add(p);
- }
- return paths;
- }
-
- /**
- * Retrieves the object name from all derived paths named {pathX} where 0 <= X < 10.
- *
- * This is the counterpart of {@link #objectsPaths(String)}
- */
- private static String objectName(final Map params) {
- final StringBuilder name = new StringBuilder();
- for (int i = 0; i < 10; i++) {
- String value = params.getOrDefault("path" + i, null);
- if (value != null) {
- if (name.length() > 0) {
- name.append('/');
- }
- name.append(value);
- }
- }
- return name.toString();
- }
-
-
- /**
- * Azure EnumerationResults Response
- */
- private static Response newEnumerationResultsResponse(final long requestId, final Container container, final String prefix) {
- final String id = Long.toString(requestId);
- final StringBuilder response = new StringBuilder();
- response.append("");
- response.append("");
- if (prefix != null) {
- response.append("").append(prefix).append("");
- } else {
- response.append("");
- }
- response.append("").append(container.objects.size()).append("");
- response.append("");
-
- int count = 0;
- for (Map.Entry object : container.objects.entrySet()) {
- String objectName = object.getKey();
- if (prefix == null || objectName.startsWith(prefix)) {
- response.append("");
- response.append("").append(objectName).append("");
- response.append("");
- response.append("").append(object.getValue().length).append("");
- response.append("").append(count++).append("");
- response.append("success");
- response.append("BlockBlob");
- response.append("");
- response.append("");
- }
- }
-
- response.append("");
- response.append("");
- response.append("");
-
- return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8));
- }
-
- private static Response newContainerNotFoundError(final long requestId) {
- return newError(requestId, RestStatus.NOT_FOUND, "ContainerNotFound", "The specified container does not exist");
- }
-
- private static Response newBlobNotFoundError(final long requestId) {
- return newError(requestId, RestStatus.NOT_FOUND, "BlobNotFound", "The specified blob does not exist");
- }
-
- private static Response newBlobAlreadyExistsError(final long requestId) {
- return newError(requestId, RestStatus.CONFLICT, "BlobAlreadyExists", "The specified blob already exists");
- }
-
- private static Response newInternalError(final long requestId) {
- return newError(requestId, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", "The server encountered an internal error");
- }
-
- /**
- * Azure Error
- *
- * https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2
- */
- private static Response newError(final long requestId,
- final RestStatus status,
- final String code,
- final String message) {
-
- final StringBuilder response = new StringBuilder();
- response.append("");
- response.append("");
- response.append("").append(code).append("
");
- response.append("").append(message).append("");
- response.append("");
-
- final Map headers = new HashMap<>(2);
- headers.put("x-ms-request-id", String.valueOf(requestId));
- headers.put("x-ms-error-code", code);
-
- return new Response(status, headers, "application/xml", response.toString().getBytes(UTF_8));
- }
-}
diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java
index f5a0ed253c8e9..1821166c8845e 100644
--- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java
+++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java
@@ -32,7 +32,6 @@
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URISyntaxException;
-import java.nio.file.FileAlreadyExistsException;
import java.nio.file.NoSuchFileException;
import java.util.Map;
diff --git a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle
index 34ec92a354277..0a610123a6fcc 100644
--- a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle
+++ b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle
@@ -26,20 +26,10 @@ import java.security.KeyPairGenerator
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
-dependencies {
- testCompile project(path: ':plugins:repository-gcs', configuration: 'runtime')
-}
-
integTestCluster {
plugin ':plugins:repository-gcs'
}
-forbiddenApisTest {
- // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
- bundledSignatures -= 'jdk-non-portable'
- bundledSignatures += 'jdk-internal'
-}
-
boolean useFixture = false
String gcsServiceAccount = System.getenv("google_storage_service_account")
@@ -61,7 +51,7 @@ if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) {
/** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/
task googleCloudStorageFixture(type: AntFixture) {
- dependsOn compileTestJava
+ dependsOn testClasses
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
executable = new File(project.runtimeJavaHome, 'bin/java')
args 'org.elasticsearch.repositories.gcs.GoogleCloudStorageFixture', baseDir, 'bucket_test'
diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java
index 6175e581e4fd0..b1a185c9c08c9 100644
--- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java
+++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java
@@ -18,133 +18,591 @@
*/
package org.elasticsearch.repositories.gcs;
-import com.sun.net.httpserver.HttpExchange;
-import com.sun.net.httpserver.HttpHandler;
-import com.sun.net.httpserver.HttpServer;
-import org.elasticsearch.common.SuppressForbidden;
-import org.elasticsearch.core.internal.io.Streams;
-import org.elasticsearch.mocksocket.MockHttpServer;
-import org.elasticsearch.repositories.gcs.GoogleCloudStorageTestServer.Response;
+import org.elasticsearch.test.fixture.AbstractHttpFixture;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.path.PathTrie;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.RestUtils;
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.net.Inet6Address;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.SocketAddress;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.StandardCopyOption;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.zip.GZIPInputStream;
-import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Collections.emptyMap;
-import static java.util.Collections.singleton;
import static java.util.Collections.singletonList;
+import static java.util.Collections.singletonMap;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
/**
- * {@link GoogleCloudStorageFixture} is a fixture that emulates a Google Cloud Storage service.
- *
- * It starts an asynchronous socket server that binds to a random local port. The server parses
- * HTTP requests and uses a {@link GoogleCloudStorageTestServer} to handle them before returning
- * them to the client as HTTP responses.
+ * {@link GoogleCloudStorageFixture} emulates a Google Cloud Storage service.
+ *
+ * The implementation is based on official documentation available at https://cloud.google.com/storage/docs/json_api/v1/.
*/
-public class GoogleCloudStorageFixture {
+public class GoogleCloudStorageFixture extends AbstractHttpFixture {
+
+ /** List of the buckets stored on this test server **/
+ private final Map buckets = ConcurrentCollections.newConcurrentMap();
+
+ /** Request handlers for the requests made by the Google Cloud Storage client **/
+ private final PathTrie handlers;
+
+ /**
+ * Creates a {@link GoogleCloudStorageFixture}
+ */
+ private GoogleCloudStorageFixture(final String workingDir, final String bucket) {
+ super(workingDir);
+ this.buckets.put(bucket, new Bucket(bucket));
+ this.handlers = defaultHandlers(buckets);
+ }
- public static void main(String[] args) throws Exception {
+ @Override
+ protected Response handle(final Request request) throws IOException {
+ final RequestHandler handler = handlers.retrieve(request.getMethod() + " " + request.getPath(), request.getParameters());
+ if (handler != null) {
+ return handler.handle(request);
+ }
+ return null;
+ }
+
+ public static void main(final String[] args) throws Exception {
if (args == null || args.length != 2) {
throw new IllegalArgumentException("GoogleCloudStorageFixture ");
}
- final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0);
- final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0);
+ final GoogleCloudStorageFixture fixture = new GoogleCloudStorageFixture(args[0], args[1]);
+ fixture.listen();
+ }
- try {
- final Path workingDirectory = workingDir(args[0]);
- /// Writes the PID of the current Java process in a `pid` file located in the working directory
- writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]);
+ /** Builds the default request handlers **/
+ private static PathTrie defaultHandlers(final Map buckets) {
+ final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER);
- final String addressAndPort = addressToString(httpServer.getAddress());
- // Writes the address and port of the http server in a `ports` file located in the working directory
- writeFile(workingDirectory, "ports", addressAndPort);
+ // GET Bucket
+ //
+ // https://cloud.google.com/storage/docs/json_api/v1/buckets/get
+ handlers.insert("GET /storage/v1/b/{bucket}", (request) -> {
+ final String name = request.getParam("bucket");
+ if (Strings.hasText(name) == false) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, "bucket name is missing");
+ }
- // Emulates a Google Cloud Storage server
- final String storageUrl = "http://" + addressAndPort;
- final GoogleCloudStorageTestServer storageTestServer = new GoogleCloudStorageTestServer(storageUrl);
- storageTestServer.createBucket(args[1]);
+ if (buckets.containsKey(name)) {
+ return newResponse(RestStatus.OK, emptyMap(), buildBucketResource(name));
+ } else {
+ return newError(RestStatus.NOT_FOUND, "bucket not found");
+ }
+ });
- httpServer.createContext("/", new ResponseHandler(storageTestServer));
- httpServer.start();
+ // GET Object
+ //
+ // https://cloud.google.com/storage/docs/json_api/v1/objects/get
+ handlers.insert("GET /storage/v1/b/{bucket}/o/{object}", (request) -> {
+ final String objectName = request.getParam("object");
+ if (Strings.hasText(objectName) == false) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing");
+ }
- // Wait to be killed
- Thread.sleep(Long.MAX_VALUE);
+ final Bucket bucket = buckets.get(request.getParam("bucket"));
+ if (bucket == null) {
+ return newError(RestStatus.NOT_FOUND, "bucket not found");
+ }
- } finally {
- httpServer.stop(0);
- }
- }
+ for (final Map.Entry object : bucket.objects.entrySet()) {
+ if (object.getKey().equals(objectName)) {
+ return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectName, object.getValue()));
+ }
+ }
+ return newError(RestStatus.NOT_FOUND, "object not found");
+ });
- @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here")
- private static Path workingDir(final String dir) {
- return Paths.get(dir);
- }
+ // Delete Object
+ //
+ // https://cloud.google.com/storage/docs/json_api/v1/objects/delete
+ handlers.insert("DELETE /storage/v1/b/{bucket}/o/{object}", (request) -> {
+ final String objectName = request.getParam("object");
+ if (Strings.hasText(objectName) == false) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing");
+ }
- private static void writeFile(final Path dir, final String fileName, final String content) throws IOException {
- final Path tempPidFile = Files.createTempFile(dir, null, null);
- Files.write(tempPidFile, singleton(content));
- Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE);
- }
+ final Bucket bucket = buckets.get(request.getParam("bucket"));
+ if (bucket == null) {
+ return newError(RestStatus.NOT_FOUND, "bucket not found");
+ }
- private static String addressToString(final SocketAddress address) {
- final InetSocketAddress inetSocketAddress = (InetSocketAddress) address;
- if (inetSocketAddress.getAddress() instanceof Inet6Address) {
- return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort();
- } else {
- return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort();
- }
- }
+ final byte[] bytes = bucket.objects.remove(objectName);
+ if (bytes != null) {
+ return new Response(RestStatus.NO_CONTENT.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
+ }
+ return newError(RestStatus.NOT_FOUND, "object not found");
+ });
+
+ // Insert Object (initialization)
+ //
+ // https://cloud.google.com/storage/docs/json_api/v1/objects/insert
+ handlers.insert("POST /upload/storage/v1/b/{bucket}/o", (request) -> {
+ final String ifGenerationMatch = request.getParam("ifGenerationMatch");
+ if ("0".equals(ifGenerationMatch) == false) {
+ return newError(RestStatus.PRECONDITION_FAILED, "object already exist");
+ }
- static class ResponseHandler implements HttpHandler {
+ final String uploadType = request.getParam("uploadType");
+ if ("resumable".equals(uploadType)) {
+ final String objectName = request.getParam("name");
+ if (Strings.hasText(objectName) == false) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing");
+ }
+ final Bucket bucket = buckets.get(request.getParam("bucket"));
+ if (bucket == null) {
+ return newError(RestStatus.NOT_FOUND, "bucket not found");
+ }
+ if (bucket.objects.putIfAbsent(objectName, EMPTY_BYTE) == null) {
+ final String location = /*endpoint +*/ "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id="
+ + objectName;
+ return newResponse(RestStatus.CREATED, singletonMap("Location", location), jsonBuilder());
+ } else {
+ return newError(RestStatus.CONFLICT, "object already exist");
+ }
+ } else if ("multipart".equals(uploadType)) {
+ /*
+ * A multipart/related request body looks like this (note the binary dump inside a text blob! nice!):
+ * --__END_OF_PART__
+ * Content-Length: 135
+ * Content-Type: application/json; charset=UTF-8
+ * content-transfer-encoding: binary
+ *
+ * {"bucket":"bucket_test","crc32c":"7XacHQ==","md5Hash":"fVztGkklMlUamsSmJK7W+w==",
+ * "name":"tests-KEwE3bU4TuyetBgQIghmUw/master.dat-temp"}
+ * --__END_OF_PART__
+ * content-transfer-encoding: binary
+ *
+ * KEwE3bU4TuyetBgQIghmUw
+ * --__END_OF_PART__--
+ */
+ String boundary = "__END_OF_PART__";
+ // Determine the multipart boundary
+ final String contentType = request.getContentType();
+ if ((contentType != null) && contentType.contains("multipart/related; boundary=")) {
+ boundary = contentType.replace("multipart/related; boundary=", "");
+ }
- private final GoogleCloudStorageTestServer storageServer;
+ InputStream inputStreamBody = new ByteArrayInputStream(request.getBody());
+ final String contentEncoding = request.getHeader("Content-Encoding");
+ if (contentEncoding != null) {
+ if ("gzip".equalsIgnoreCase(contentEncoding)) {
+ inputStreamBody = new GZIPInputStream(inputStreamBody);
+ }
+ }
+ // Read line by line ?both? parts of the multipart. Decoding headers as
+ // IS_8859_1 is safe.
+ try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStreamBody, StandardCharsets.ISO_8859_1))) {
+ String line;
+ // read first part delimiter
+ line = reader.readLine();
+ if ((line == null) || (line.equals("--" + boundary) == false)) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR,
+ "Error parsing multipart request. Does not start with the part delimiter.");
+ }
+ final Map> firstPartHeaders = new HashMap<>();
+ // Reads the first part's headers, if any
+ while ((line = reader.readLine()) != null) {
+ if (line.equals("\r\n") || (line.length() == 0)) {
+ // end of headers
+ break;
+ } else {
+ final String[] header = line.split(":", 2);
+ firstPartHeaders.put(header[0], singletonList(header[1]));
+ }
+ }
+ final List firstPartContentTypes = firstPartHeaders.getOrDefault("Content-Type",
+ firstPartHeaders.get("Content-type"));
+ if ((firstPartContentTypes == null)
+ || (firstPartContentTypes.stream().noneMatch(x -> x.contains("application/json")))) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR,
+ "Error parsing multipart request. Metadata part expected to have the \"application/json\" content type.");
+ }
+ // read metadata part, a single line
+ line = reader.readLine();
+ final byte[] metadata = line.getBytes(StandardCharsets.ISO_8859_1);
+ if ((firstPartContentTypes != null) && (firstPartContentTypes.stream().anyMatch((x -> x.contains("charset=utf-8"))))) {
+ // decode as utf-8
+ line = new String(metadata, StandardCharsets.UTF_8);
+ }
+ final Matcher objectNameMatcher = Pattern.compile("\"name\":\"([^\"]*)\"").matcher(line);
+ objectNameMatcher.find();
+ final String objectName = objectNameMatcher.group(1);
+ final Matcher bucketNameMatcher = Pattern.compile("\"bucket\":\"([^\"]*)\"").matcher(line);
+ bucketNameMatcher.find();
+ final String bucketName = bucketNameMatcher.group(1);
+ // read second part delimiter
+ line = reader.readLine();
+ if ((line == null) || (line.equals("--" + boundary) == false)) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR,
+ "Error parsing multipart request. Second part does not start with delimiter. "
+ + "Is the metadata multi-line?");
+ }
+ final Map> secondPartHeaders = new HashMap<>();
+ // Reads the second part's headers, if any
+ while ((line = reader.readLine()) != null) {
+ if (line.equals("\r\n") || (line.length() == 0)) {
+ // end of headers
+ break;
+ } else {
+ final String[] header = line.split(":", 2);
+ secondPartHeaders.put(header[0], singletonList(header[1]));
+ }
+ }
+ final List secondPartTransferEncoding = secondPartHeaders.getOrDefault("Content-Transfer-Encoding",
+ secondPartHeaders.get("content-transfer-encoding"));
+ if ((secondPartTransferEncoding == null)
+ || (secondPartTransferEncoding.stream().noneMatch(x -> x.contains("binary")))) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR,
+ "Error parsing multipart request. Data part expected to have the \"binary\" content transfer encoding.");
+ }
+ final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ int c;
+ while ((c = reader.read()) != -1) {
+ // one char to one byte, because of the ISO_8859_1 encoding
+ baos.write(c);
+ }
+ final byte[] temp = baos.toByteArray();
+ final byte[] trailingEnding = ("\r\n--" + boundary + "--\r\n").getBytes(StandardCharsets.ISO_8859_1);
+ // check trailing
+ for (int i = trailingEnding.length - 1; i >= 0; i--) {
+ if (trailingEnding[i] != temp[(temp.length - trailingEnding.length) + i]) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, "Error parsing multipart request.");
+ }
+ }
+ final Bucket bucket = buckets.get(bucketName);
+ if (bucket == null) {
+ return newError(RestStatus.NOT_FOUND, "bucket not found");
+ }
+ final byte[] objectData = Arrays.copyOf(temp, temp.length - trailingEnding.length);
+ if ((objectName != null) && (bucketName != null) && (objectData != null)) {
+ bucket.objects.put(objectName, objectData);
+ return new Response(RestStatus.OK.getStatus(), JSON_CONTENT_TYPE, metadata);
+ } else {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, "error parsing multipart request");
+ }
+ }
+ } else {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable or multipart");
+ }
+ });
- private ResponseHandler(final GoogleCloudStorageTestServer storageServer) {
- this.storageServer = storageServer;
- }
+ // Insert Object (upload)
+ //
+ // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload
+ handlers.insert("PUT /upload/storage/v1/b/{bucket}/o", (request) -> {
+ final String objectId = request.getParam("upload_id");
+ if (Strings.hasText(objectId) == false) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload id is missing");
+ }
- @Override
- public void handle(HttpExchange exchange) throws IOException {
- String method = exchange.getRequestMethod();
- String path = storageServer.getEndpoint() + exchange.getRequestURI().getRawPath();
- String query = exchange.getRequestURI().getRawQuery();
- Map> headers = exchange.getRequestHeaders();
+ final Bucket bucket = buckets.get(request.getParam("bucket"));
+ if (bucket == null) {
+ return newError(RestStatus.NOT_FOUND, "bucket not found");
+ }
- ByteArrayOutputStream out = new ByteArrayOutputStream();
- Streams.copy(exchange.getRequestBody(), out);
+ if (bucket.objects.containsKey(objectId) == false) {
+ return newError(RestStatus.NOT_FOUND, "object name not found");
+ }
- Response storageResponse = null;
+ bucket.objects.put(objectId, request.getBody());
+ return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectId, request.getBody()));
+ });
- final String userAgent = exchange.getRequestHeaders().getFirst("User-Agent");
- if (userAgent != null && userAgent.startsWith("Apache Ant")) {
- // This is a request made by the AntFixture, just reply "OK"
- storageResponse = new Response(RestStatus.OK, emptyMap(), "text/plain; charset=utf-8", "OK".getBytes(UTF_8));
- } else {
- // Otherwise simulate a S3 response
- storageResponse = storageServer.handle(method, path, query, headers, out.toByteArray());
+ // List Objects
+ //
+ // https://cloud.google.com/storage/docs/json_api/v1/objects/list
+ handlers.insert("GET /storage/v1/b/{bucket}/o", (request) -> {
+ final Bucket bucket = buckets.get(request.getParam("bucket"));
+ if (bucket == null) {
+ return newError(RestStatus.NOT_FOUND, "bucket not found");
+ }
+
+ final XContentBuilder builder = jsonBuilder();
+ builder.startObject();
+ builder.field("kind", "storage#objects");
+ {
+ builder.startArray("items");
+
+ final String prefixParam = request.getParam("prefix");
+ for (final Map.Entry object : bucket.objects.entrySet()) {
+ if ((prefixParam != null) && (object.getKey().startsWith(prefixParam) == false)) {
+ continue;
+ }
+ buildObjectResource(builder, bucket.name, object.getKey(), object.getValue());
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+ return newResponse(RestStatus.OK, emptyMap(), builder);
+ });
+
+ // Download Object
+ //
+ // https://cloud.google.com/storage/docs/request-body
+ handlers.insert("GET /download/storage/v1/b/{bucket}/o/{object}", (request) -> {
+ final String object = request.getParam("object");
+ if (Strings.hasText(object) == false) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, "object id is missing");
+ }
+
+ final Bucket bucket = buckets.get(request.getParam("bucket"));
+ if (bucket == null) {
+ return newError(RestStatus.NOT_FOUND, "bucket not found");
}
- Map> responseHeaders = exchange.getResponseHeaders();
- responseHeaders.put("Content-Type", singletonList(storageResponse.contentType));
- storageResponse.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v)));
- exchange.sendResponseHeaders(storageResponse.status.getStatus(), storageResponse.body.length);
- if (storageResponse.body.length > 0) {
- exchange.getResponseBody().write(storageResponse.body);
+ if (bucket.objects.containsKey(object) == false) {
+ return newError(RestStatus.NOT_FOUND, "object name not found");
}
- exchange.close();
+
+ return new Response(RestStatus.OK.getStatus(), contentType("application/octet-stream"), bucket.objects.get(object));
+ });
+
+ // Batch
+ //
+ // https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch
+ handlers.insert("POST /batch/storage/v1", (request) -> {
+ final List batchedResponses = new ArrayList<>();
+
+ // A batch request body looks like this:
+ //
+ // --__END_OF_PART__
+ // Content-Length: 71
+ // Content-Type: application/http
+ // content-id: 1
+ // content-transfer-encoding: binary
+ //
+ // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/foo%2Ftest HTTP/1.1
+ //
+ //
+ // --__END_OF_PART__
+ // Content-Length: 71
+ // Content-Type: application/http
+ // content-id: 2
+ // content-transfer-encoding: binary
+ //
+ // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/bar%2Ftest HTTP/1.1
+ //
+ //
+ // --__END_OF_PART__--
+
+ // Default multipart boundary
+ String boundary = "__END_OF_PART__";
+
+ // Determine the multipart boundary
+ final String contentType = request.getContentType();
+ if ((contentType != null) && contentType.contains("multipart/mixed; boundary=")) {
+ boundary = contentType.replace("multipart/mixed; boundary=", "");
+ }
+
+ long batchedRequests = 0L;
+
+ // Read line by line the batched requests
+ try (BufferedReader reader = new BufferedReader(
+ new InputStreamReader(
+ new ByteArrayInputStream(request.getBody()), StandardCharsets.UTF_8))) {
+ String line;
+ while ((line = reader.readLine()) != null) {
+ // Start of a batched request
+ if (line.equals("--" + boundary)) {
+ final Map batchedHeaders = new HashMap<>();
+
+ // Reads the headers, if any
+ while ((line = reader.readLine()) != null) {
+ if (line.equals("\r\n") || (line.length() == 0)) {
+ // end of headers
+ break;
+ } else {
+ final String[] header = line.split(":", 2);
+ batchedHeaders.put(header[0], header[1]);
+ }
+ }
+
+ // Reads the method and URL
+ line = reader.readLine();
+ final String batchedMethod = line.substring(0, line.indexOf(' '));
+ final URI batchedUri = URI.create(line.substring(batchedMethod.length() + 1, line.lastIndexOf(' ')));
+
+ // Reads the body
+ line = reader.readLine();
+ byte[] batchedBody = new byte[0];
+ if ((line != null) || (line.startsWith("--" + boundary) == false)) {
+ batchedBody = line.getBytes(StandardCharsets.UTF_8);
+ }
+
+ final Request batchedRequest = new Request(batchedRequests, batchedMethod, batchedUri, batchedHeaders, batchedBody);
+ batchedRequests = batchedRequests + 1;
+
+ // Executes the batched request
+ final RequestHandler handler =
+ handlers.retrieve(batchedRequest.getMethod() + " " + batchedRequest.getPath(), batchedRequest.getParameters());
+ if (handler != null) {
+ try {
+ batchedResponses.add(handler.handle(batchedRequest));
+ } catch (final IOException e) {
+ batchedResponses.add(newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage()));
+ }
+ }
+ }
+ }
+ }
+
+ // Now we can build the response
+ final String sep = "--";
+ final String line = "\r\n";
+
+ final StringBuilder builder = new StringBuilder();
+ for (final Response response : batchedResponses) {
+ builder.append(sep).append(boundary).append(line);
+ builder.append("Content-Type: application/http").append(line);
+ builder.append(line);
+ builder.append("HTTP/1.1 ")
+ .append(response.getStatus())
+ .append(' ')
+ .append(RestStatus.fromCode(response.getStatus()).toString())
+ .append(line);
+ builder.append("Content-Length: ").append(response.getBody().length).append(line);
+ builder.append("Content-Type: ").append(response.getContentType()).append(line);
+ response.getHeaders().forEach((k, v) -> builder.append(k).append(": ").append(v).append(line));
+ builder.append(line);
+ builder.append(new String(response.getBody(), StandardCharsets.UTF_8)).append(line);
+ builder.append(line);
+ }
+ builder.append(line);
+ builder.append(sep).append(boundary).append(sep);
+
+ final byte[] content = builder.toString().getBytes(StandardCharsets.UTF_8);
+ return new Response(RestStatus.OK.getStatus(), contentType("multipart/mixed; boundary=" + boundary), content);
+ });
+
+ // Fake refresh of an OAuth2 token
+ //
+ handlers.insert("POST /o/oauth2/token", (request) ->
+ newResponse(RestStatus.OK, emptyMap(), jsonBuilder()
+ .startObject()
+ .field("access_token", "unknown")
+ .field("token_type", "Bearer")
+ .field("expires_in", 3600)
+ .endObject())
+ );
+
+ return handlers;
+ }
+
+ /**
+ * Represents a Storage bucket as if it was created on Google Cloud Storage.
+ */
+ static class Bucket {
+
+ /** Bucket name **/
+ final String name;
+
+ /** Blobs contained in the bucket **/
+ final Map objects;
+
+ Bucket(final String name) {
+ this.name = Objects.requireNonNull(name);
+ this.objects = ConcurrentCollections.newConcurrentMap();
+ }
+ }
+
+ /**
+ * Builds a JSON response
+ */
+ private static Response newResponse(final RestStatus status, final Map headers, final XContentBuilder xContentBuilder) {
+ final Map responseHeaders = new HashMap<>(JSON_CONTENT_TYPE);
+ responseHeaders.putAll(headers);
+
+ try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
+ BytesReference.bytes(xContentBuilder).writeTo(out);
+
+ return new Response(status.getStatus(), responseHeaders, out.toByteArray());
+ } catch (final IOException e) {
+ return newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage());
}
}
+
+ /**
+ * Storage Error JSON representation
+ */
+ private static Response newError(final RestStatus status, final String message) {
+ try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
+ try (XContentBuilder builder = jsonBuilder()) {
+ builder.startObject()
+ .startObject("error")
+ .field("code", status.getStatus())
+ .field("message", message)
+ .startArray("errors")
+ .startObject()
+ .field("domain", "global")
+ .field("reason", status.toString())
+ .field("message", message)
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject();
+ BytesReference.bytes(builder).writeTo(out);
+ }
+ return new Response(status.getStatus(), JSON_CONTENT_TYPE, out.toByteArray());
+ } catch (final IOException e) {
+ final byte[] bytes = (message != null ? message : "something went wrong").getBytes(StandardCharsets.UTF_8);
+ return new Response(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), TEXT_PLAIN_CONTENT_TYPE, bytes);
+ }
+ }
+
+ /**
+ * Storage Bucket JSON representation as defined in
+ * https://cloud.google.com/storage/docs/json_api/v1/bucket#resource
+ */
+ private static XContentBuilder buildBucketResource(final String name) throws IOException {
+ return jsonBuilder().startObject()
+ .field("kind", "storage#bucket")
+ .field("name", name)
+ .field("id", name)
+ .endObject();
+ }
+
+ /**
+ * Storage Object JSON representation as defined in
+ * https://cloud.google.com/storage/docs/json_api/v1/objects#resource
+ */
+ private static XContentBuilder buildObjectResource(final String bucket, final String name, final byte[] bytes) throws IOException {
+ return buildObjectResource(jsonBuilder(), bucket, name, bytes);
+ }
+
+ /**
+ * Storage Object JSON representation as defined in
+ * https://cloud.google.com/storage/docs/json_api/v1/objects#resource
+ */
+ private static XContentBuilder buildObjectResource(final XContentBuilder builder,
+ final String bucket,
+ final String name,
+ final byte[] bytes) throws IOException {
+ return builder.startObject()
+ .field("kind", "storage#object")
+ .field("id", String.join("/", bucket, name))
+ .field("name", name)
+ .field("bucket", bucket)
+ .field("size", String.valueOf(bytes.length))
+ .endObject();
+ }
}
diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java
deleted file mode 100644
index fd09b46c73fc1..0000000000000
--- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java
+++ /dev/null
@@ -1,663 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.repositories.gcs;
-
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.path.PathTrie;
-import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentType;
-import org.elasticsearch.rest.RestStatus;
-import org.elasticsearch.rest.RestUtils;
-
-import java.io.BufferedReader;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.zip.GZIPInputStream;
-
-import static java.util.Collections.emptyMap;
-import static java.util.Collections.singletonList;
-import static java.util.Collections.singletonMap;
-import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-
-/**
- * {@link GoogleCloudStorageTestServer} emulates a Google Cloud Storage service through
- * a {@link #handle(String, String, String, Map, byte[])} method that provides appropriate
- * responses for specific requests like the real Google Cloud platform would do.
- * It is largely based on official documentation available at https://cloud.google.com/storage/docs/json_api/v1/.
- */
-public class GoogleCloudStorageTestServer {
-
- private static final byte[] EMPTY_BYTE = new byte[0];
-
- /** List of the buckets stored on this test server **/
- private final Map buckets = ConcurrentCollections.newConcurrentMap();
-
- /** Request handlers for the requests made by the Google Cloud Storage client **/
- private final PathTrie handlers;
-
- /** Server endpoint **/
- private final String endpoint;
-
- /**
- * Creates a {@link GoogleCloudStorageTestServer} with a custom endpoint
- */
- GoogleCloudStorageTestServer(final String endpoint) {
- this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null");
- this.handlers = defaultHandlers(endpoint, buckets);
- }
-
- /** Creates a bucket in the test server **/
- void createBucket(final String bucketName) {
- buckets.put(bucketName, new Bucket(bucketName));
- }
-
- public String getEndpoint() {
- return endpoint;
- }
-
- /**
- * Returns a Google Cloud Storage response for the given request
- *
- * @param method the HTTP method of the request
- * @param path the path of the URL of the request
- * @param query the queryString of the URL of request
- * @param headers the HTTP headers of the request
- * @param body the HTTP request body
- * @return a {@link Response}
- * @throws IOException if something goes wrong
- */
- public Response handle(final String method,
- final String path,
- final String query,
- final Map> headers,
- byte[] body) throws IOException {
-
- final Map params = new HashMap<>();
- if (query != null) {
- RestUtils.decodeQueryString(query, 0, params);
- }
-
- final RequestHandler handler = handlers.retrieve(method + " " + path, params);
- if (handler != null) {
- return handler.execute(params, headers, body);
- } else {
- return newError(RestStatus.INTERNAL_SERVER_ERROR,
- "No handler defined for request [method: " + method + ", path: " + path + "]");
- }
- }
-
- @FunctionalInterface
- interface RequestHandler {
-
- /**
- * Simulates the execution of a Storage request and returns a corresponding response.
- *
- * @param params the request's query string parameters
- * @param headers the request's headers
- * @param body the request body provided as a byte array
- * @return the corresponding response
- *
- * @throws IOException if something goes wrong
- */
- Response execute(Map params, Map> headers, byte[] body) throws IOException;
- }
-
- /** Builds the default request handlers **/
- private static PathTrie defaultHandlers(final String endpoint, final Map buckets) {
- final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER);
-
- // GET Bucket
- //
- // https://cloud.google.com/storage/docs/json_api/v1/buckets/get
- handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}", (params, headers, body) -> {
- final String name = params.get("bucket");
- if (Strings.hasText(name) == false) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, "bucket name is missing");
- }
-
- if (buckets.containsKey(name)) {
- return newResponse(RestStatus.OK, emptyMap(), buildBucketResource(name));
- } else {
- return newError(RestStatus.NOT_FOUND, "bucket not found");
- }
- });
-
- // GET Object
- //
- // https://cloud.google.com/storage/docs/json_api/v1/objects/get
- handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> {
- final String objectName = params.get("object");
- if (Strings.hasText(objectName) == false) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing");
- }
-
- final Bucket bucket = buckets.get(params.get("bucket"));
- if (bucket == null) {
- return newError(RestStatus.NOT_FOUND, "bucket not found");
- }
-
- for (final Map.Entry object : bucket.objects.entrySet()) {
- if (object.getKey().equals(objectName)) {
- return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectName, object.getValue()));
- }
- }
- return newError(RestStatus.NOT_FOUND, "object not found");
- });
-
- // Delete Object
- //
- // https://cloud.google.com/storage/docs/json_api/v1/objects/delete
- handlers.insert("DELETE " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> {
- final String objectName = params.get("object");
- if (Strings.hasText(objectName) == false) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing");
- }
-
- final Bucket bucket = buckets.get(params.get("bucket"));
- if (bucket == null) {
- return newError(RestStatus.NOT_FOUND, "bucket not found");
- }
-
- final byte[] bytes = bucket.objects.remove(objectName);
- if (bytes != null) {
- return new Response(RestStatus.NO_CONTENT, emptyMap(), XContentType.JSON.mediaType(), EMPTY_BYTE);
- }
- return newError(RestStatus.NOT_FOUND, "object not found");
- });
-
- // Insert Object (initialization)
- //
- // https://cloud.google.com/storage/docs/json_api/v1/objects/insert
- handlers.insert("POST " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> {
- final String uploadType = params.get("uploadType");
- if ("resumable".equals(uploadType)) {
- final String objectName = params.get("name");
- if (Strings.hasText(objectName) == false) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing");
- }
- final Bucket bucket = buckets.get(params.get("bucket"));
- if (bucket == null) {
- return newError(RestStatus.NOT_FOUND, "bucket not found");
- }
- if (bucket.objects.putIfAbsent(objectName, EMPTY_BYTE) == null) {
- final String location = endpoint + "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id="
- + objectName;
- return new Response(RestStatus.CREATED, singletonMap("Location", location), XContentType.JSON.mediaType(), EMPTY_BYTE);
- } else {
- return newError(RestStatus.CONFLICT, "object already exist");
- }
- } else if ("multipart".equals(uploadType)) {
- /*
- * A multipart/related request body looks like this (note the binary dump inside a text blob! nice!):
- * --__END_OF_PART__
- * Content-Length: 135
- * Content-Type: application/json; charset=UTF-8
- * content-transfer-encoding: binary
- *
- * {"bucket":"bucket_test","crc32c":"7XacHQ==","md5Hash":"fVztGkklMlUamsSmJK7W+w==",
- * "name":"tests-KEwE3bU4TuyetBgQIghmUw/master.dat-temp"}
- * --__END_OF_PART__
- * content-transfer-encoding: binary
- *
- * KEwE3bU4TuyetBgQIghmUw
- * --__END_OF_PART__--
- */
- String boundary = "__END_OF_PART__";
- // Determine the multipart boundary
- final List contentTypes = headers.getOrDefault("Content-Type", headers.get("Content-type"));
- if (contentTypes != null) {
- final String contentType = contentTypes.get(0);
- if ((contentType != null) && contentType.contains("multipart/related; boundary=")) {
- boundary = contentType.replace("multipart/related; boundary=", "");
- }
- }
- InputStream inputStreamBody = new ByteArrayInputStream(body);
- final List contentEncodings = headers.getOrDefault("Content-Encoding", headers.get("Content-encoding"));
- if (contentEncodings != null) {
- if (contentEncodings.stream().anyMatch(x -> "gzip".equalsIgnoreCase(x))) {
- inputStreamBody = new GZIPInputStream(inputStreamBody);
- }
- }
- // Read line by line ?both? parts of the multipart. Decoding headers as
- // IS_8859_1 is safe.
- try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStreamBody, StandardCharsets.ISO_8859_1))) {
- String line;
- // read first part delimiter
- line = reader.readLine();
- if ((line == null) || (line.equals("--" + boundary) == false)) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR,
- "Error parsing multipart request. Does not start with the part delimiter.");
- }
- final Map> firstPartHeaders = new HashMap<>();
- // Reads the first part's headers, if any
- while ((line = reader.readLine()) != null) {
- if (line.equals("\r\n") || (line.length() == 0)) {
- // end of headers
- break;
- } else {
- final String[] header = line.split(":", 2);
- firstPartHeaders.put(header[0], singletonList(header[1]));
- }
- }
- final List firstPartContentTypes = firstPartHeaders.getOrDefault("Content-Type",
- firstPartHeaders.get("Content-type"));
- if ((firstPartContentTypes == null)
- || (firstPartContentTypes.stream().noneMatch(x -> x.contains("application/json")))) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR,
- "Error parsing multipart request. Metadata part expected to have the \"application/json\" content type.");
- }
- // read metadata part, a single line
- line = reader.readLine();
- final byte[] metadata = line.getBytes(StandardCharsets.ISO_8859_1);
- if ((firstPartContentTypes != null) && (firstPartContentTypes.stream().anyMatch((x -> x.contains("charset=utf-8"))))) {
- // decode as utf-8
- line = new String(metadata, StandardCharsets.UTF_8);
- }
- final Matcher objectNameMatcher = Pattern.compile("\"name\":\"([^\"]*)\"").matcher(line);
- objectNameMatcher.find();
- final String objectName = objectNameMatcher.group(1);
- final Matcher bucketNameMatcher = Pattern.compile("\"bucket\":\"([^\"]*)\"").matcher(line);
- bucketNameMatcher.find();
- final String bucketName = bucketNameMatcher.group(1);
- // read second part delimiter
- line = reader.readLine();
- if ((line == null) || (line.equals("--" + boundary) == false)) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR,
- "Error parsing multipart request. Second part does not start with delimiter. "
- + "Is the metadata multi-line?");
- }
- final Map> secondPartHeaders = new HashMap<>();
- // Reads the second part's headers, if any
- while ((line = reader.readLine()) != null) {
- if (line.equals("\r\n") || (line.length() == 0)) {
- // end of headers
- break;
- } else {
- final String[] header = line.split(":", 2);
- secondPartHeaders.put(header[0], singletonList(header[1]));
- }
- }
- final List secondPartTransferEncoding = secondPartHeaders.getOrDefault("Content-Transfer-Encoding",
- secondPartHeaders.get("content-transfer-encoding"));
- if ((secondPartTransferEncoding == null)
- || (secondPartTransferEncoding.stream().noneMatch(x -> x.contains("binary")))) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR,
- "Error parsing multipart request. Data part expected to have the \"binary\" content transfer encoding.");
- }
- final ByteArrayOutputStream baos = new ByteArrayOutputStream();
- int c;
- while ((c = reader.read()) != -1) {
- // one char to one byte, because of the ISO_8859_1 encoding
- baos.write(c);
- }
- final byte[] temp = baos.toByteArray();
- final byte[] trailingEnding = ("\r\n--" + boundary + "--\r\n").getBytes(StandardCharsets.ISO_8859_1);
- // check trailing
- for (int i = trailingEnding.length - 1; i >= 0; i--) {
- if (trailingEnding[i] != temp[(temp.length - trailingEnding.length) + i]) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, "Error parsing multipart request.");
- }
- }
- final Bucket bucket = buckets.get(bucketName);
- if (bucket == null) {
- return newError(RestStatus.NOT_FOUND, "bucket not found");
- }
- final byte[] objectData = Arrays.copyOf(temp, temp.length - trailingEnding.length);
- if ((objectName != null) && (bucketName != null) && (objectData != null)) {
- bucket.objects.put(objectName, objectData);
- return new Response(RestStatus.OK, emptyMap(), XContentType.JSON.mediaType(), metadata);
- } else {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, "error parsing multipart request");
- }
- }
- } else {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable or multipart");
- }
- });
-
- // Insert Object (upload)
- //
- // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload
- handlers.insert("PUT " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> {
- final String objectId = params.get("upload_id");
- if (Strings.hasText(objectId) == false) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload id is missing");
- }
-
- final Bucket bucket = buckets.get(params.get("bucket"));
- if (bucket == null) {
- return newError(RestStatus.NOT_FOUND, "bucket not found");
- }
-
- if (bucket.objects.containsKey(objectId) == false) {
- return newError(RestStatus.NOT_FOUND, "object name not found");
- }
-
- bucket.objects.put(objectId, body);
- return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectId, body));
- });
-
- // List Objects
- //
- // https://cloud.google.com/storage/docs/json_api/v1/objects/list
- handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}/o", (params, headers, body) -> {
- final Bucket bucket = buckets.get(params.get("bucket"));
- if (bucket == null) {
- return newError(RestStatus.NOT_FOUND, "bucket not found");
- }
-
- final XContentBuilder builder = jsonBuilder();
- builder.startObject();
- builder.field("kind", "storage#objects");
- {
- builder.startArray("items");
-
- final String prefixParam = params.get("prefix");
- for (final Map.Entry object : bucket.objects.entrySet()) {
- if ((prefixParam != null) && (object.getKey().startsWith(prefixParam) == false)) {
- continue;
- }
- buildObjectResource(builder, bucket.name, object.getKey(), object.getValue());
- }
- builder.endArray();
- }
- builder.endObject();
- return newResponse(RestStatus.OK, emptyMap(), builder);
- });
-
- // Download Object
- //
- // https://cloud.google.com/storage/docs/request-body
- handlers.insert("GET " + endpoint + "/download/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> {
- final String object = params.get("object");
- if (Strings.hasText(object) == false) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, "object id is missing");
- }
-
- final Bucket bucket = buckets.get(params.get("bucket"));
- if (bucket == null) {
- return newError(RestStatus.NOT_FOUND, "bucket not found");
- }
-
- if (bucket.objects.containsKey(object) == false) {
- return newError(RestStatus.NOT_FOUND, "object name not found");
- }
-
- return new Response(RestStatus.OK, emptyMap(), "application/octet-stream", bucket.objects.get(object));
- });
-
- // Batch
- //
- // https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch
- handlers.insert("POST " + endpoint + "/batch/storage/v1", (params, headers, body) -> {
- final List batchedResponses = new ArrayList<>();
-
- // A batch request body looks like this:
- //
- // --__END_OF_PART__
- // Content-Length: 71
- // Content-Type: application/http
- // content-id: 1
- // content-transfer-encoding: binary
- //
- // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/foo%2Ftest HTTP/1.1
- //
- //
- // --__END_OF_PART__
- // Content-Length: 71
- // Content-Type: application/http
- // content-id: 2
- // content-transfer-encoding: binary
- //
- // DELETE https://www.googleapis.com/storage/v1/b/ohifkgu/o/bar%2Ftest HTTP/1.1
- //
- //
- // --__END_OF_PART__--
-
- // Default multipart boundary
- String boundary = "__END_OF_PART__";
-
- // Determine the multipart boundary
- final List contentTypes = headers.getOrDefault("Content-Type", headers.get("Content-type"));
- if (contentTypes != null) {
- final String contentType = contentTypes.get(0);
- if ((contentType != null) && contentType.contains("multipart/mixed; boundary=")) {
- boundary = contentType.replace("multipart/mixed; boundary=", "");
- }
- }
-
- // Read line by line the batched requests
- try (BufferedReader reader = new BufferedReader(
- new InputStreamReader(
- new ByteArrayInputStream(body), StandardCharsets.UTF_8))) {
- String line;
- while ((line = reader.readLine()) != null) {
- // Start of a batched request
- if (line.equals("--" + boundary)) {
- final Map> batchedHeaders = new HashMap<>();
-
- // Reads the headers, if any
- while ((line = reader.readLine()) != null) {
- if (line.equals("\r\n") || (line.length() == 0)) {
- // end of headers
- break;
- } else {
- final String[] header = line.split(":", 2);
- batchedHeaders.put(header[0], singletonList(header[1]));
- }
- }
-
- // Reads the method and URL
- line = reader.readLine();
- final String batchedUrl = line.substring(0, line.lastIndexOf(' '));
-
- final Map batchedParams = new HashMap<>();
- final int questionMark = batchedUrl.indexOf('?');
- if (questionMark != -1) {
- RestUtils.decodeQueryString(batchedUrl.substring(questionMark + 1), 0, batchedParams);
- }
-
- // Reads the body
- line = reader.readLine();
- byte[] batchedBody = new byte[0];
- if ((line != null) || (line.startsWith("--" + boundary) == false)) {
- batchedBody = line.getBytes(StandardCharsets.UTF_8);
- }
-
- // Executes the batched request
- final RequestHandler handler = handlers.retrieve(batchedUrl, batchedParams);
- if (handler != null) {
- try {
- batchedResponses.add(handler.execute(batchedParams, batchedHeaders, batchedBody));
- } catch (final IOException e) {
- batchedResponses.add(newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage()));
- }
- }
- }
- }
- }
-
- // Now we can build the response
- final String sep = "--";
- final String line = "\r\n";
-
- final StringBuilder builder = new StringBuilder();
- for (final Response response : batchedResponses) {
- builder.append(sep).append(boundary).append(line);
- builder.append("Content-Type: application/http").append(line);
- builder.append(line);
- builder.append("HTTP/1.1 ")
- .append(response.status.getStatus())
- .append(' ')
- .append(response.status.toString())
- .append(line);
- builder.append("Content-Length: ").append(response.body.length).append(line);
- builder.append("Content-Type: ").append(response.contentType).append(line);
- response.headers.forEach((k, v) -> builder.append(k).append(": ").append(v).append(line));
- builder.append(line);
- builder.append(new String(response.body, StandardCharsets.UTF_8)).append(line);
- builder.append(line);
- }
- builder.append(line);
- builder.append(sep).append(boundary).append(sep);
-
- final byte[] content = builder.toString().getBytes(StandardCharsets.UTF_8);
- return new Response(RestStatus.OK, emptyMap(), "multipart/mixed; boundary=" + boundary, content);
- });
-
- // Fake refresh of an OAuth2 token
- //
- handlers.insert("POST " + endpoint + "/o/oauth2/token", (url, params, req) ->
- newResponse(RestStatus.OK, emptyMap(), jsonBuilder()
- .startObject()
- .field("access_token", "unknown")
- .field("token_type", "Bearer")
- .field("expires_in", 3600)
- .endObject())
- );
-
- return handlers;
- }
-
- /**
- * Represents a Storage bucket as if it was created on Google Cloud Storage.
- */
- static class Bucket {
-
- /** Bucket name **/
- final String name;
-
- /** Blobs contained in the bucket **/
- final Map objects;
-
- Bucket(final String name) {
- this.name = Objects.requireNonNull(name);
- this.objects = ConcurrentCollections.newConcurrentMap();
- }
- }
-
- /**
- * Represents a Storage HTTP Response.
- */
- static class Response {
-
- final RestStatus status;
- final Map headers;
- final String contentType;
- final byte[] body;
-
- Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) {
- this.status = Objects.requireNonNull(status);
- this.headers = Objects.requireNonNull(headers);
- this.contentType = Objects.requireNonNull(contentType);
- this.body = Objects.requireNonNull(body);
- }
- }
-
- /**
- * Builds a JSON response
- */
- private static Response newResponse(final RestStatus status, final Map headers, final XContentBuilder xContentBuilder) {
- try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
- BytesReference.bytes(xContentBuilder).writeTo(out);
- return new Response(status, headers, XContentType.JSON.mediaType(), out.toByteArray());
- } catch (final IOException e) {
- return newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage());
- }
- }
-
- /**
- * Storage Error JSON representation
- */
- private static Response newError(final RestStatus status, final String message) {
- try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
- try (XContentBuilder builder = jsonBuilder()) {
- builder.startObject()
- .startObject("error")
- .field("code", status.getStatus())
- .field("message", message)
- .startArray("errors")
- .startObject()
- .field("domain", "global")
- .field("reason", status.toString())
- .field("message", message)
- .endObject()
- .endArray()
- .endObject()
- .endObject();
- BytesReference.bytes(builder).writeTo(out);
- }
- return new Response(status, emptyMap(), XContentType.JSON.mediaType(), out.toByteArray());
- } catch (final IOException e) {
- final byte[] bytes = (message != null ? message : "something went wrong").getBytes(StandardCharsets.UTF_8);
- return new Response(RestStatus.INTERNAL_SERVER_ERROR, emptyMap(), " text/plain", bytes);
- }
- }
-
- /**
- * Storage Bucket JSON representation as defined in
- * https://cloud.google.com/storage/docs/json_api/v1/bucket#resource
- */
- private static XContentBuilder buildBucketResource(final String name) throws IOException {
- return jsonBuilder().startObject()
- .field("kind", "storage#bucket")
- .field("name", name)
- .field("id", name)
- .endObject();
- }
-
- /**
- * Storage Object JSON representation as defined in
- * https://cloud.google.com/storage/docs/json_api/v1/objects#resource
- */
- private static XContentBuilder buildObjectResource(final String bucket, final String name, final byte[] bytes) throws IOException {
- return buildObjectResource(jsonBuilder(), bucket, name, bytes);
- }
-
- /**
- * Storage Object JSON representation as defined in
- * https://cloud.google.com/storage/docs/json_api/v1/objects#resource
- */
- private static XContentBuilder buildObjectResource(final XContentBuilder builder,
- final String bucket,
- final String name,
- final byte[] bytes) throws IOException {
- return builder.startObject()
- .field("kind", "storage#object")
- .field("id", String.join("/", bucket, name))
- .field("name", name)
- .field("bucket", bucket)
- .field("size", String.valueOf(bytes.length))
- .endObject();
- }
-}
diff --git a/plugins/repository-s3/qa/amazon-s3/build.gradle b/plugins/repository-s3/qa/amazon-s3/build.gradle
index 5e288899021a1..dbbffdebded47 100644
--- a/plugins/repository-s3/qa/amazon-s3/build.gradle
+++ b/plugins/repository-s3/qa/amazon-s3/build.gradle
@@ -31,12 +31,6 @@ integTestCluster {
plugin ':plugins:repository-s3'
}
-forbiddenApisTest {
- // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
- bundledSignatures -= 'jdk-non-portable'
- bundledSignatures += 'jdk-internal'
-}
-
boolean useFixture = false
String s3AccessKey = System.getenv("amazon_s3_access_key")
@@ -54,7 +48,7 @@ if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) {
/** A task to start the AmazonS3Fixture which emulates a S3 service **/
task s3Fixture(type: AntFixture) {
- dependsOn compileTestJava
+ dependsOn testClasses
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
executable = new File(project.runtimeJavaHome, 'bin/java')
args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3Bucket
@@ -64,6 +58,7 @@ Map expansions = [
'bucket': s3Bucket,
'base_path': s3BasePath
]
+
processTestResources {
inputs.properties(expansions)
MavenFilteringHack.filter(it, expansions)
diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java
index cf123f85d98a9..20e21675acb79 100644
--- a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java
+++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java
@@ -18,132 +18,423 @@
*/
package org.elasticsearch.repositories.s3;
-import com.sun.net.httpserver.HttpExchange;
-import com.sun.net.httpserver.HttpHandler;
-import com.sun.net.httpserver.HttpServer;
-import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.test.fixture.AbstractHttpFixture;
+import com.amazonaws.util.DateUtils;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.Streams;
-import org.elasticsearch.mocksocket.MockHttpServer;
-import org.elasticsearch.repositories.s3.AmazonS3TestServer.Response;
+import org.elasticsearch.common.path.PathTrie;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.RestUtils;
-import java.io.ByteArrayOutputStream;
+import java.io.BufferedInputStream;
+import java.io.ByteArrayInputStream;
import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.net.Inet6Address;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.SocketAddress;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.StandardCopyOption;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.Collections.emptyMap;
-import static java.util.Collections.singleton;
-import static java.util.Collections.singletonList;
/**
- * {@link AmazonS3Fixture} is a fixture that emulates a S3 service.
- *
- * It starts an asynchronous socket server that binds to a random local port. The server parses
- * HTTP requests and uses a {@link AmazonS3TestServer} to handle them before returning
- * them to the client as HTTP responses.
+ * {@link AmazonS3Fixture} emulates an AWS S3 service
+ * .
+ * he implementation is based on official documentation available at https://docs.aws.amazon.com/AmazonS3/latest/API/.
*/
-public class AmazonS3Fixture {
+public class AmazonS3Fixture extends AbstractHttpFixture {
- public static void main(String[] args) throws Exception {
+ /** List of the buckets stored on this test server **/
+ private final Map buckets = ConcurrentCollections.newConcurrentMap();
+
+ /** Request handlers for the requests made by the S3 client **/
+ private final PathTrie handlers;
+
+ /**
+ * Creates a {@link AmazonS3Fixture}
+ */
+ private AmazonS3Fixture(final String workingDir, final String bucket) {
+ super(workingDir);
+ this.buckets.put(bucket, new Bucket(bucket));
+ this.handlers = defaultHandlers(buckets);
+ }
+
+ @Override
+ protected Response handle(final Request request) throws IOException {
+ final RequestHandler handler = handlers.retrieve(request.getMethod() + " " + request.getPath(), request.getParameters());
+ if (handler != null) {
+ final String authorization = request.getHeader("Authorization");
+ if (authorization == null
+ || (authorization.length() > 0 && authorization.contains("s3_integration_test_access_key") == false)) {
+ return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Access Denied", "");
+ }
+ return handler.handle(request);
+ }
+ return null;
+ }
+
+ public static void main(final String[] args) throws Exception {
if (args == null || args.length != 2) {
throw new IllegalArgumentException("AmazonS3Fixture ");
}
- final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0);
- final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0);
+ final AmazonS3Fixture fixture = new AmazonS3Fixture(args[0], args[1]);
+ fixture.listen();
+ }
- try {
- final Path workingDirectory = workingDir(args[0]);
- /// Writes the PID of the current Java process in a `pid` file located in the working directory
- writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]);
+ /** Builds the default request handlers **/
+ private static PathTrie defaultHandlers(final Map buckets) {
+ final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER);
- final String addressAndPort = addressToString(httpServer.getAddress());
- // Writes the address and port of the http server in a `ports` file located in the working directory
- writeFile(workingDirectory, "ports", addressAndPort);
+ // HEAD Object
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
+ objectsPaths("HEAD /{bucket}").forEach(path ->
+ handlers.insert(path, (request) -> {
+ final String bucketName = request.getParam("bucket");
- // Emulates S3
- final String storageUrl = "http://" + addressAndPort;
- final AmazonS3TestServer storageTestServer = new AmazonS3TestServer(storageUrl);
- storageTestServer.createBucket(args[1]);
+ final Bucket bucket = buckets.get(bucketName);
+ if (bucket == null) {
+ return newBucketNotFoundError(request.getId(), bucketName);
+ }
- httpServer.createContext("/", new ResponseHandler(storageTestServer));
- httpServer.start();
+ final String objectName = objectName(request.getParameters());
+ for (Map.Entry object : bucket.objects.entrySet()) {
+ if (object.getKey().equals(objectName)) {
+ return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
+ }
+ }
+ return newObjectNotFoundError(request.getId(), objectName);
+ })
+ );
- // Wait to be killed
- Thread.sleep(Long.MAX_VALUE);
+ // PUT Object
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+ objectsPaths("PUT /{bucket}").forEach(path ->
+ handlers.insert(path, (request) -> {
+ final String destBucketName = request.getParam("bucket");
- } finally {
- httpServer.stop(0);
- }
- }
+ final Bucket destBucket = buckets.get(destBucketName);
+ if (destBucket == null) {
+ return newBucketNotFoundError(request.getId(), destBucketName);
+ }
- @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here")
- private static Path workingDir(final String dir) {
- return Paths.get(dir);
- }
+ final String destObjectName = objectName(request.getParameters());
+
+ // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip"
+ // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here.
+ //
+ // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
+ //
+ String headerDecodedContentLength = request.getHeader("X-amz-decoded-content-length");
+ if (headerDecodedContentLength != null) {
+ int contentLength = Integer.valueOf(headerDecodedContentLength);
+
+ // Chunked requests have a payload like this:
+ //
+ // 105;chunk-signature=01d0de6be013115a7f4794db8c4b9414e6ec71262cc33ae562a71f2eaed1efe8
+ // ... bytes of data ....
+ // 0;chunk-signature=f890420b1974c5469aaf2112e9e6f2e0334929fd45909e03c0eff7a84124f6a4
+ //
+ try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(request.getBody()))) {
+ int b;
+ // Moves to the end of the first signature line
+ while ((b = inputStream.read()) != -1) {
+ if (b == '\n') {
+ break;
+ }
+ }
+
+ final byte[] bytes = new byte[contentLength];
+ inputStream.read(bytes, 0, contentLength);
+
+ destBucket.objects.put(destObjectName, bytes);
+ return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
+ }
+ }
+
+ return newInternalError(request.getId(), "Something is wrong with this PUT request");
+ })
+ );
+
+ // DELETE Object
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
+ objectsPaths("DELETE /{bucket}").forEach(path ->
+ handlers.insert(path, (request) -> {
+ final String bucketName = request.getParam("bucket");
+
+ final Bucket bucket = buckets.get(bucketName);
+ if (bucket == null) {
+ return newBucketNotFoundError(request.getId(), bucketName);
+ }
+
+ final String objectName = objectName(request.getParameters());
+ if (bucket.objects.remove(objectName) != null) {
+ return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
+ }
+ return newObjectNotFoundError(request.getId(), objectName);
+ })
+ );
+
+ // GET Object
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
+ objectsPaths("GET /{bucket}").forEach(path ->
+ handlers.insert(path, (request) -> {
+ final String bucketName = request.getParam("bucket");
+
+ final Bucket bucket = buckets.get(bucketName);
+ if (bucket == null) {
+ return newBucketNotFoundError(request.getId(), bucketName);
+ }
+
+ final String objectName = objectName(request.getParameters());
+ if (bucket.objects.containsKey(objectName)) {
+ return new Response(RestStatus.OK.getStatus(), contentType("application/octet-stream"), bucket.objects.get(objectName));
+
+ }
+ return newObjectNotFoundError(request.getId(), objectName);
+ })
+ );
+
+ // HEAD Bucket
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html
+ handlers.insert("HEAD /{bucket}", (request) -> {
+ String bucket = request.getParam("bucket");
+ if (Strings.hasText(bucket) && buckets.containsKey(bucket)) {
+ return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
+ } else {
+ return newBucketNotFoundError(request.getId(), bucket);
+ }
+ });
+
+ // GET Bucket (List Objects) Version 1
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
+ handlers.insert("GET /{bucket}/", (request) -> {
+ final String bucketName = request.getParam("bucket");
+
+ final Bucket bucket = buckets.get(bucketName);
+ if (bucket == null) {
+ return newBucketNotFoundError(request.getId(), bucketName);
+ }
+
+ String prefix = request.getParam("prefix");
+ if (prefix == null) {
+ prefix = request.getHeader("Prefix");
+ }
+ return newListBucketResultResponse(request.getId(), bucket, prefix);
+ });
+
+ // Delete Multiple Objects
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
+ handlers.insert("POST /", (request) -> {
+ final List deletes = new ArrayList<>();
+ final List errors = new ArrayList<>();
- private static void writeFile(final Path dir, final String fileName, final String content) throws IOException {
- final Path tempPidFile = Files.createTempFile(dir, null, null);
- Files.write(tempPidFile, singleton(content));
- Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE);
+ if (request.getParam("delete") != null) {
+ // The request body is something like:
+ //
+ String requestBody = Streams.copyToString(new InputStreamReader(new ByteArrayInputStream(request.getBody()), UTF_8));
+ if (requestBody.startsWith("")) {
+ final String startMarker = "";
+ final String endMarker = "";
+
+ int offset = 0;
+ while (offset != -1) {
+ offset = requestBody.indexOf(startMarker, offset);
+ if (offset > 0) {
+ int closingOffset = requestBody.indexOf(endMarker, offset);
+ if (closingOffset != -1) {
+ offset = offset + startMarker.length();
+ final String objectName = requestBody.substring(offset, closingOffset);
+
+ boolean found = false;
+ for (Bucket bucket : buckets.values()) {
+ if (bucket.objects.remove(objectName) != null) {
+ found = true;
+ }
+ }
+
+ if (found) {
+ deletes.add(objectName);
+ } else {
+ errors.add(objectName);
+ }
+ }
+ }
+ }
+ return newDeleteResultResponse(request.getId(), deletes, errors);
+ }
+ }
+ return newInternalError(request.getId(), "Something is wrong with this POST multiple deletes request");
+ });
+
+ return handlers;
}
- private static String addressToString(final SocketAddress address) {
- final InetSocketAddress inetSocketAddress = (InetSocketAddress) address;
- if (inetSocketAddress.getAddress() instanceof Inet6Address) {
- return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort();
- } else {
- return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort();
+ /**
+ * Represents a S3 bucket.
+ */
+ static class Bucket {
+
+ /** Bucket name **/
+ final String name;
+
+ /** Blobs contained in the bucket **/
+ final Map objects;
+
+ Bucket(final String name) {
+ this.name = Objects.requireNonNull(name);
+ this.objects = ConcurrentCollections.newConcurrentMap();
}
}
- static class ResponseHandler implements HttpHandler {
+ /**
+ * Decline a path like "http://host:port/{bucket}" into 10 derived paths like:
+ * - http://host:port/{bucket}/{path0}
+ * - http://host:port/{bucket}/{path0}/{path1}
+ * - http://host:port/{bucket}/{path0}/{path1}/{path2}
+ * - etc
+ */
+ private static List objectsPaths(final String path) {
+ final List paths = new ArrayList<>();
+ String p = path;
+ for (int i = 0; i < 10; i++) {
+ p = p + "/{path" + i + "}";
+ paths.add(p);
+ }
+ return paths;
+ }
- private final AmazonS3TestServer storageServer;
+ /**
+ * Retrieves the object name from all derives paths named {pathX} where 0 <= X < 10.
+ *
+ * This is the counterpart of {@link #objectsPaths(String)}
+ */
+ private static String objectName(final Map params) {
+ final StringBuilder name = new StringBuilder();
+ for (int i = 0; i < 10; i++) {
+ String value = params.getOrDefault("path" + i, null);
+ if (value != null) {
+ if (name.length() > 0) {
+ name.append('/');
+ }
+ name.append(value);
+ }
+ }
+ return name.toString();
+ }
- private ResponseHandler(final AmazonS3TestServer storageServer) {
- this.storageServer = storageServer;
+ /**
+ * S3 ListBucketResult Response
+ */
+ private static Response newListBucketResultResponse(final long requestId, final Bucket bucket, final String prefix) {
+ final String id = Long.toString(requestId);
+ final StringBuilder response = new StringBuilder();
+ response.append("");
+ response.append("");
+ response.append("");
+ if (prefix != null) {
+ response.append(prefix);
}
+ response.append("");
+ response.append("");
+ response.append("1000");
+ response.append("false");
- @Override
- public void handle(HttpExchange exchange) throws IOException {
- String method = exchange.getRequestMethod();
- String path = storageServer.getEndpoint() + exchange.getRequestURI().getRawPath();
- String query = exchange.getRequestURI().getRawQuery();
- Map> headers = exchange.getRequestHeaders();
- ByteArrayOutputStream out = new ByteArrayOutputStream();
- Streams.copy(exchange.getRequestBody(), out);
-
- Response storageResponse = null;
-
- final String userAgent = exchange.getRequestHeaders().getFirst("User-Agent");
- if (userAgent != null && userAgent.startsWith("Apache Ant")) {
- // This is a request made by the AntFixture, just reply "OK"
- storageResponse = new Response(RestStatus.OK, emptyMap(), "text/plain; charset=utf-8", "OK".getBytes(UTF_8));
- } else {
- // Otherwise simulate a S3 response
- storageResponse = storageServer.handle(method, path, query, headers, out.toByteArray());
+ int count = 0;
+ for (Map.Entry object : bucket.objects.entrySet()) {
+ String objectName = object.getKey();
+ if (prefix == null || objectName.startsWith(prefix)) {
+ response.append("");
+ response.append("").append(objectName).append("");
+ response.append("").append(DateUtils.formatISO8601Date(new Date())).append("");
+ response.append(""").append(count++).append(""");
+ response.append("").append(object.getValue().length).append("");
+ response.append("");
}
+ }
+ response.append("");
- Map> responseHeaders = exchange.getResponseHeaders();
- responseHeaders.put("Content-Type", singletonList(storageResponse.contentType));
- storageResponse.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v)));
- exchange.sendResponseHeaders(storageResponse.status.getStatus(), storageResponse.body.length);
- if (storageResponse.body.length > 0) {
- exchange.getResponseBody().write(storageResponse.body);
- }
- exchange.close();
+ final Map headers = new HashMap<>(contentType("application/xml"));
+ headers.put("x-amz-request-id", id);
+
+ return new Response(RestStatus.OK.getStatus(), headers, response.toString().getBytes(UTF_8));
+ }
+
+ /**
+ * S3 DeleteResult Response
+ */
+ private static Response newDeleteResultResponse(final long requestId,
+ final List deletedObjects,
+ final List ignoredObjects) {
+ final String id = Long.toString(requestId);
+
+ final StringBuilder response = new StringBuilder();
+ response.append("");
+ response.append("");
+ for (String deletedObject : deletedObjects) {
+ response.append("");
+ response.append("").append(deletedObject).append("");
+ response.append("");
+ }
+ for (String ignoredObject : ignoredObjects) {
+ response.append("");
+ response.append("").append(ignoredObject).append("");
+ response.append("NoSuchKey
");
+ response.append("");
}
+ response.append("");
+
+ final Map headers = new HashMap<>(contentType("application/xml"));
+ headers.put("x-amz-request-id", id);
+
+ return new Response(RestStatus.OK.getStatus(), headers, response.toString().getBytes(UTF_8));
+ }
+
+ private static Response newBucketNotFoundError(final long requestId, final String bucket) {
+ return newError(requestId, RestStatus.NOT_FOUND, "NoSuchBucket", "The specified bucket does not exist", bucket);
+ }
+
+ private static Response newObjectNotFoundError(final long requestId, final String object) {
+ return newError(requestId, RestStatus.NOT_FOUND, "NoSuchKey", "The specified key does not exist", object);
+ }
+
+ private static Response newInternalError(final long requestId, final String resource) {
+ return newError(requestId, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", "We encountered an internal error", resource);
+ }
+
+ /**
+ * S3 Error
+ *
+ * https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+ */
+ private static Response newError(final long requestId,
+ final RestStatus status,
+ final String code,
+ final String message,
+ final String resource) {
+ final String id = Long.toString(requestId);
+ final StringBuilder response = new StringBuilder();
+ response.append("");
+ response.append("");
+ response.append("").append(code).append("
");
+ response.append("").append(message).append("");
+ response.append("").append(resource).append("");
+ response.append("").append(id).append("");
+ response.append("");
+
+ final Map headers = new HashMap<>(contentType("application/xml"));
+ headers.put("x-amz-request-id", id);
+
+ return new Response(status.getStatus(), headers, response.toString().getBytes(UTF_8));
}
}
diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java
deleted file mode 100644
index 029b28320d259..0000000000000
--- a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.repositories.s3;
-
-import com.amazonaws.util.DateUtils;
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.io.Streams;
-import org.elasticsearch.common.path.PathTrie;
-import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
-import org.elasticsearch.rest.RestStatus;
-import org.elasticsearch.rest.RestUtils;
-
-import java.io.BufferedInputStream;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.Collections.emptyList;
-import static java.util.Collections.emptyMap;
-import static java.util.Collections.singletonMap;
-
-/**
- * {@link AmazonS3TestServer} emulates a S3 service through a {@link #handle(String, String, String, Map, byte[])}
- * method that provides appropriate responses for specific requests like the real S3 platform would do.
- * It is largely based on official documentation available at https://docs.aws.amazon.com/AmazonS3/latest/API/.
- */
-public class AmazonS3TestServer {
-
- private static byte[] EMPTY_BYTE = new byte[0];
- /** List of the buckets stored on this test server **/
- private final Map buckets = ConcurrentCollections.newConcurrentMap();
-
- /** Request handlers for the requests made by the S3 client **/
- private final PathTrie handlers;
-
- /** Server endpoint **/
- private final String endpoint;
-
- /** Increments for the requests ids **/
- private final AtomicLong requests = new AtomicLong(0);
-
- /**
- * Creates a {@link AmazonS3TestServer} with a custom endpoint
- */
- AmazonS3TestServer(final String endpoint) {
- this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null");
- this.handlers = defaultHandlers(endpoint, buckets);
- }
-
- /** Creates a bucket in the test server **/
- void createBucket(final String bucketName) {
- buckets.put(bucketName, new Bucket(bucketName));
- }
-
- public String getEndpoint() {
- return endpoint;
- }
-
- /**
- * Returns a response for the given request
- *
- * @param method the HTTP method of the request
- * @param path the path of the URL of the request
- * @param query the queryString of the URL of request
- * @param headers the HTTP headers of the request
- * @param body the HTTP request body
- * @return a {@link Response}
- * @throws IOException if something goes wrong
- */
- public Response handle(final String method,
- final String path,
- final String query,
- final Map> headers,
- byte[] body) throws IOException {
-
- final long requestId = requests.incrementAndGet();
-
- final Map params = new HashMap<>();
- if (query != null) {
- RestUtils.decodeQueryString(query, 0, params);
- }
-
- final List authorizations = headers.get("Authorization");
- if (authorizations == null
- || (authorizations.isEmpty() == false & authorizations.get(0).contains("s3_integration_test_access_key") == false)) {
- return newError(requestId, RestStatus.FORBIDDEN, "AccessDenied", "Access Denied", "");
- }
-
- final RequestHandler handler = handlers.retrieve(method + " " + path, params);
- if (handler != null) {
- return handler.execute(params, headers, body, requestId);
- } else {
- return newInternalError(requestId, "No handler defined for request [method: " + method + ", path: " + path + "]");
- }
- }
-
- @FunctionalInterface
- interface RequestHandler {
-
- /**
- * Simulates the execution of a S3 request and returns a corresponding response.
- *
- * @param params the request's query string parameters
- * @param headers the request's headers
- * @param body the request body provided as a byte array
- * @param requestId a unique id for the incoming request
- * @return the corresponding response
- *
- * @throws IOException if something goes wrong
- */
- Response execute(Map params, Map> headers, byte[] body, long requestId) throws IOException;
- }
-
- /** Builds the default request handlers **/
- private static PathTrie defaultHandlers(final String endpoint, final Map buckets) {
- final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER);
-
- // HEAD Object
- //
- // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
- objectsPaths("HEAD " + endpoint + "/{bucket}").forEach(path ->
- handlers.insert(path, (params, headers, body, id) -> {
- final String bucketName = params.get("bucket");
-
- final Bucket bucket = buckets.get(bucketName);
- if (bucket == null) {
- return newBucketNotFoundError(id, bucketName);
- }
-
- final String objectName = objectName(params);
- for (Map.Entry object : bucket.objects.entrySet()) {
- if (object.getKey().equals(objectName)) {
- return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE);
- }
- }
- return newObjectNotFoundError(id, objectName);
- })
- );
-
- // PUT Object
- //
- // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
- objectsPaths("PUT " + endpoint + "/{bucket}").forEach(path ->
- handlers.insert(path, (params, headers, body, id) -> {
- final String destBucketName = params.get("bucket");
-
- final Bucket destBucket = buckets.get(destBucketName);
- if (destBucket == null) {
- return newBucketNotFoundError(id, destBucketName);
- }
-
- final String destObjectName = objectName(params);
-
- // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip"
- // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here.
- //
- // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
- //
- List headerDecodedContentLength = headers.getOrDefault("X-amz-decoded-content-length", emptyList());
- if (headerDecodedContentLength.size() == 1) {
- int contentLength = Integer.valueOf(headerDecodedContentLength.get(0));
-
- // Chunked requests have a payload like this:
- //
- // 105;chunk-signature=01d0de6be013115a7f4794db8c4b9414e6ec71262cc33ae562a71f2eaed1efe8
- // ... bytes of data ....
- // 0;chunk-signature=f890420b1974c5469aaf2112e9e6f2e0334929fd45909e03c0eff7a84124f6a4
- //
- try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(body))) {
- int b;
- // Moves to the end of the first signature line
- while ((b = inputStream.read()) != -1) {
- if (b == '\n') {
- break;
- }
- }
-
- final byte[] bytes = new byte[contentLength];
- inputStream.read(bytes, 0, contentLength);
-
- destBucket.objects.put(destObjectName, bytes);
- return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE);
- }
- }
-
- return newInternalError(id, "Something is wrong with this PUT request");
- })
- );
-
- // DELETE Object
- //
- // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
- objectsPaths("DELETE " + endpoint + "/{bucket}").forEach(path ->
- handlers.insert(path, (params, headers, body, id) -> {
- final String bucketName = params.get("bucket");
-
- final Bucket bucket = buckets.get(bucketName);
- if (bucket == null) {
- return newBucketNotFoundError(id, bucketName);
- }
-
- final String objectName = objectName(params);
- if (bucket.objects.remove(objectName) != null) {
- return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE);
- }
- return newObjectNotFoundError(id, objectName);
- })
- );
-
- // GET Object
- //
- // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
- objectsPaths("GET " + endpoint + "/{bucket}").forEach(path ->
- handlers.insert(path, (params, headers, body, id) -> {
- final String bucketName = params.get("bucket");
-
- final Bucket bucket = buckets.get(bucketName);
- if (bucket == null) {
- return newBucketNotFoundError(id, bucketName);
- }
-
- final String objectName = objectName(params);
- if (bucket.objects.containsKey(objectName)) {
- return new Response(RestStatus.OK, emptyMap(), "application/octet-stream", bucket.objects.get(objectName));
-
- }
- return newObjectNotFoundError(id, objectName);
- })
- );
-
- // HEAD Bucket
- //
- // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html
- handlers.insert("HEAD " + endpoint + "/{bucket}", (params, headers, body, id) -> {
- String bucket = params.get("bucket");
- if (Strings.hasText(bucket) && buckets.containsKey(bucket)) {
- return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE);
- } else {
- return newBucketNotFoundError(id, bucket);
- }
- });
-
- // GET Bucket (List Objects) Version 1
- //
- // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
- handlers.insert("GET " + endpoint + "/{bucket}/", (params, headers, body, id) -> {
- final String bucketName = params.get("bucket");
-
- final Bucket bucket = buckets.get(bucketName);
- if (bucket == null) {
- return newBucketNotFoundError(id, bucketName);
- }
-
- String prefix = params.get("prefix");
- if (prefix == null) {
- List prefixes = headers.get("Prefix");
- if (prefixes != null && prefixes.size() == 1) {
- prefix = prefixes.get(0);
- }
- }
- return newListBucketResultResponse(id, bucket, prefix);
- });
-
- // Delete Multiple Objects
- //
- // https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
- handlers.insert("POST " + endpoint + "/", (params, headers, body, id) -> {
- final List deletes = new ArrayList<>();
- final List errors = new ArrayList<>();
-
- if (params.containsKey("delete")) {
- // The request body is something like:
- //
- String request = Streams.copyToString(new InputStreamReader(new ByteArrayInputStream(body), StandardCharsets.UTF_8));
- if (request.startsWith("")) {
- final String startMarker = "";
- final String endMarker = "";
-
- int offset = 0;
- while (offset != -1) {
- offset = request.indexOf(startMarker, offset);
- if (offset > 0) {
- int closingOffset = request.indexOf(endMarker, offset);
- if (closingOffset != -1) {
- offset = offset + startMarker.length();
- final String objectName = request.substring(offset, closingOffset);
-
- boolean found = false;
- for (Bucket bucket : buckets.values()) {
- if (bucket.objects.remove(objectName) != null) {
- found = true;
- }
- }
-
- if (found) {
- deletes.add(objectName);
- } else {
- errors.add(objectName);
- }
- }
- }
- }
- return newDeleteResultResponse(id, deletes, errors);
- }
- }
- return newInternalError(id, "Something is wrong with this POST multiple deletes request");
- });
-
- return handlers;
- }
-
- /**
- * Represents a S3 bucket.
- */
- static class Bucket {
-
- /** Bucket name **/
- final String name;
-
- /** Blobs contained in the bucket **/
- final Map objects;
-
- Bucket(final String name) {
- this.name = Objects.requireNonNull(name);
- this.objects = ConcurrentCollections.newConcurrentMap();
- }
- }
-
- /**
- * Represents a HTTP Response.
- */
- static class Response {
-
- final RestStatus status;
- final Map headers;
- final String contentType;
- final byte[] body;
-
- Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) {
- this.status = Objects.requireNonNull(status);
- this.headers = Objects.requireNonNull(headers);
- this.contentType = Objects.requireNonNull(contentType);
- this.body = Objects.requireNonNull(body);
- }
- }
-
- /**
- * Decline a path like "http://host:port/{bucket}" into 10 derived paths like:
- * - http://host:port/{bucket}/{path0}
- * - http://host:port/{bucket}/{path0}/{path1}
- * - http://host:port/{bucket}/{path0}/{path1}/{path2}
- * - etc
- */
- private static List objectsPaths(final String path) {
- final List paths = new ArrayList<>();
- String p = path;
- for (int i = 0; i < 10; i++) {
- p = p + "/{path" + i + "}";
- paths.add(p);
- }
- return paths;
- }
-
- /**
- * Retrieves the object name from all derives paths named {pathX} where 0 <= X < 10.
- *
- * This is the counterpart of {@link #objectsPaths(String)}
- */
- private static String objectName(final Map params) {
- final StringBuilder name = new StringBuilder();
- for (int i = 0; i < 10; i++) {
- String value = params.getOrDefault("path" + i, null);
- if (value != null) {
- if (name.length() > 0) {
- name.append('/');
- }
- name.append(value);
- }
- }
- return name.toString();
- }
-
- /**
- * S3 ListBucketResult Response
- */
- private static Response newListBucketResultResponse(final long requestId, final Bucket bucket, final String prefix) {
- final String id = Long.toString(requestId);
- final StringBuilder response = new StringBuilder();
- response.append("");
- response.append("");
- response.append("");
- if (prefix != null) {
- response.append(prefix);
- }
- response.append("");
- response.append("");
- response.append("1000");
- response.append("false");
-
- int count = 0;
- for (Map.Entry object : bucket.objects.entrySet()) {
- String objectName = object.getKey();
- if (prefix == null || objectName.startsWith(prefix)) {
- response.append("");
- response.append("").append(objectName).append("");
- response.append("").append(DateUtils.formatISO8601Date(new Date())).append("");
- response.append(""").append(count++).append(""");
- response.append("").append(object.getValue().length).append("");
- response.append("");
- }
- }
- response.append("");
- return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8));
- }
-
- /**
- * S3 DeleteResult Response
- */
- private static Response newDeleteResultResponse(final long requestId,
- final List deletedObjects,
- final List ignoredObjects) {
- final String id = Long.toString(requestId);
-
- final StringBuilder response = new StringBuilder();
- response.append("");
- response.append("");
- for (String deletedObject : deletedObjects) {
- response.append("");
- response.append("").append(deletedObject).append("");
- response.append("");
- }
- for (String ignoredObject : ignoredObjects) {
- response.append("");
- response.append("").append(ignoredObject).append("");
- response.append("NoSuchKey
");
- response.append("");
- }
- response.append("");
- return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8));
- }
-
- private static Response newBucketNotFoundError(final long requestId, final String bucket) {
- return newError(requestId, RestStatus.NOT_FOUND, "NoSuchBucket", "The specified bucket does not exist", bucket);
- }
-
- private static Response newObjectNotFoundError(final long requestId, final String object) {
- return newError(requestId, RestStatus.NOT_FOUND, "NoSuchKey", "The specified key does not exist", object);
- }
-
- private static Response newInternalError(final long requestId, final String resource) {
- return newError(requestId, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", "We encountered an internal error", resource);
- }
-
- /**
- * S3 Error
- *
- * https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
- */
- private static Response newError(final long requestId,
- final RestStatus status,
- final String code,
- final String message,
- final String resource) {
- final String id = Long.toString(requestId);
- final StringBuilder response = new StringBuilder();
- response.append("");
- response.append("");
- response.append("").append(code).append("
");
- response.append("").append(message).append("");
- response.append("").append(resource).append("");
- response.append("").append(id).append("");
- response.append("");
- return new Response(status, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8));
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java
index 235effdcf4492..17acf7c10f534 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -254,7 +254,6 @@
import org.elasticsearch.rest.action.admin.indices.RestFlushAction;
import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction;
import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction;
-import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction;
import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction;
import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction;
@@ -553,8 +552,6 @@ public void initRestHandlers(Supplier nodesInCluster) {
registerHandler.accept(new RestRestoreSnapshotAction(settings, restController));
registerHandler.accept(new RestDeleteSnapshotAction(settings, restController));
registerHandler.accept(new RestSnapshotsStatusAction(settings, restController));
-
- registerHandler.accept(new RestGetAllAliasesAction(settings, restController));
registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter));
registerHandler.accept(new RestIndicesStatsAction(settings, restController));
registerHandler.accept(new RestIndicesSegmentsAction(settings, restController));
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
index 72246ec8539fd..a1f0965d110b2 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
@@ -29,7 +29,6 @@
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.support.ActionFilters;
-import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java
index 00b7f4e6186a9..6207f333ceddf 100644
--- a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java
@@ -20,7 +20,6 @@
package org.elasticsearch.action.support;
import org.apache.logging.log4j.Logger;
-import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
@@ -35,8 +34,6 @@
import java.util.concurrent.atomic.AtomicInteger;
-import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
-
public abstract class TransportAction extends AbstractComponent {
protected final ThreadPool threadPool;
diff --git a/server/src/main/java/org/elasticsearch/common/CheckedRunnable.java b/server/src/main/java/org/elasticsearch/common/CheckedRunnable.java
index 196eb53a878d5..721d7be14b4aa 100644
--- a/server/src/main/java/org/elasticsearch/common/CheckedRunnable.java
+++ b/server/src/main/java/org/elasticsearch/common/CheckedRunnable.java
@@ -19,8 +19,6 @@
package org.elasticsearch.common;
-import java.lang.Runnable;
-
/**
* A {@link Runnable}-like interface which allows throwing checked exceptions.
*/
diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java
index f7735dd8197ac..c2263fc201e18 100644
--- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java
+++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java
@@ -30,8 +30,6 @@
import org.apache.lucene.search.ScorerSupplier;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
-import org.apache.lucene.search.TopDocsCollector;
-import org.apache.lucene.search.TopScoreDocCollector;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -376,9 +374,9 @@ public float score() throws IOException {
double factor = computeScore(docId, subQueryScore);
float finalScore = scoreCombiner.combine(subQueryScore, factor, maxBoost);
if (finalScore == Float.NEGATIVE_INFINITY || Float.isNaN(finalScore)) {
- /**
- * These scores are invalid for score based {@link TopDocsCollector}s.
- * See {@link TopScoreDocCollector} for details.
+ /*
+ These scores are invalid for score based {@link org.apache.lucene.search.TopDocsCollector}s.
+ See {@link org.apache.lucene.search.TopScoreDocCollector} for details.
*/
throw new ElasticsearchException("function score query returned an invalid score: " + finalScore + " for doc: " + docId);
}
diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java
index 0821b176e75e6..e048512e6382c 100644
--- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java
+++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java
@@ -29,6 +29,7 @@
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.OutputStreamIndexOutput;
import org.apache.lucene.store.SimpleFSDirectory;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.bytes.BytesArray;
@@ -76,6 +77,7 @@ public abstract class MetaDataStateFormat {
private final String prefix;
private final Pattern stateFilePattern;
+ private static final Logger logger = Loggers.getLogger(MetaDataStateFormat.class);
/**
* Creates a new {@link MetaDataStateFormat} instance
@@ -134,6 +136,7 @@ public void close() throws IOException {
IOUtils.fsync(tmpStatePath, false); // fsync the state file
Files.move(tmpStatePath, finalStatePath, StandardCopyOption.ATOMIC_MOVE);
IOUtils.fsync(stateLocation, true);
+ logger.trace("written state to {}", finalStatePath);
for (int i = 1; i < locations.length; i++) {
stateLocation = locations[i].resolve(STATE_DIR_NAME);
Files.createDirectories(stateLocation);
@@ -145,12 +148,15 @@ public void close() throws IOException {
// we are on the same FileSystem / Partition here we can do an atomic move
Files.move(tmpPath, finalPath, StandardCopyOption.ATOMIC_MOVE);
IOUtils.fsync(stateLocation, true);
+ logger.trace("copied state to {}", finalPath);
} finally {
Files.deleteIfExists(tmpPath);
+ logger.trace("cleaned up {}", tmpPath);
}
}
} finally {
Files.deleteIfExists(tmpStatePath);
+ logger.trace("cleaned up {}", tmpStatePath);
}
cleanupOldFiles(prefix, fileName, locations);
}
@@ -211,20 +217,19 @@ protected Directory newDirectory(Path dir) throws IOException {
}
private void cleanupOldFiles(final String prefix, final String currentStateFile, Path[] locations) throws IOException {
- final DirectoryStream.Filter filter = new DirectoryStream.Filter() {
- @Override
- public boolean accept(Path entry) throws IOException {
- final String entryFileName = entry.getFileName().toString();
- return Files.isRegularFile(entry)
- && entryFileName.startsWith(prefix) // only state files
- && currentStateFile.equals(entryFileName) == false; // keep the current state file around
- }
+ final DirectoryStream.Filter filter = entry -> {
+ final String entryFileName = entry.getFileName().toString();
+ return Files.isRegularFile(entry)
+ && entryFileName.startsWith(prefix) // only state files
+ && currentStateFile.equals(entryFileName) == false; // keep the current state file around
};
// now clean up the old files
for (Path dataLocation : locations) {
+ logger.trace("cleanupOldFiles: cleaning up {}", dataLocation);
try (DirectoryStream stream = Files.newDirectoryStream(dataLocation.resolve(STATE_DIR_NAME), filter)) {
for (Path stateFile : stream) {
Files.deleteIfExists(stateFile);
+ logger.trace("cleanupOldFiles: cleaned up {}", stateFile);
}
}
}
diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java
index 00b981175f228..fd1698bb00659 100644
--- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java
+++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java
@@ -123,6 +123,7 @@ public void writeIndex(String reason, IndexMetaData indexMetaData) throws IOExce
try {
IndexMetaData.FORMAT.write(indexMetaData,
nodeEnv.indexPaths(indexMetaData.getIndex()));
+ logger.trace("[{}] state written", index);
} catch (Exception ex) {
logger.warn(() -> new ParameterizedMessage("[{}]: failed to write index state", index), ex);
throw new IOException("failed to write state for [" + index + "]", ex);
@@ -136,6 +137,7 @@ void writeGlobalState(String reason, MetaData metaData) throws IOException {
logger.trace("[_global] writing state, reason [{}]", reason);
try {
MetaData.FORMAT.write(metaData, nodeEnv.nodeDataPaths());
+ logger.trace("[_global] state written");
} catch (Exception ex) {
logger.warn("[_global]: failed to write global state", ex);
throw new IOException("failed to write global state", ex);
diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java
index d60d9cd9ce6b6..9671db35ced36 100644
--- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java
+++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java
@@ -51,7 +51,6 @@
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.unit.Fuzziness;
-import org.elasticsearch.index.analysis.ShingleTokenFilterFactory;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.query.QueryShardContext;
@@ -62,7 +61,7 @@
import static org.elasticsearch.common.lucene.search.Queries.newLenientFieldQuery;
import static org.elasticsearch.common.lucene.search.Queries.newUnmappedFieldQuery;
-public class MatchQuery {
+public class MatchQuery {
public enum Type implements Writeable {
/**
@@ -401,9 +400,9 @@ protected Query createFieldQuery(Analyzer analyzer, BooleanClause.Occur operator
// query based on the analysis chain.
try (TokenStream source = analyzer.tokenStream(field, queryText)) {
if (source.hasAttribute(DisableGraphAttribute.class)) {
- /**
- * A {@link TokenFilter} in this {@link TokenStream} disabled the graph analysis to avoid
- * paths explosion. See {@link ShingleTokenFilterFactory} for details.
+ /*
+ A {@link TokenFilter} in this {@link TokenStream} disabled the graph analysis to avoid
+ paths explosion. See {@link org.elasticsearch.index.analysis.ShingleTokenFilterFactory} for details.
*/
setEnableGraphQueries(false);
}
diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java
index 8e05e7bf08efa..b39ebd51f2bc8 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java
@@ -35,7 +35,6 @@
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.index.seqno.SeqNoStats;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.tasks.Task;
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java
index b24729f50d5f4..8a1e4e74e819e 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java
@@ -60,6 +60,8 @@ public class RestGetAliasesAction extends BaseRestHandler {
public RestGetAliasesAction(final Settings settings, final RestController controller) {
super(settings);
+ controller.registerHandler(GET, "/_alias", this);
+ controller.registerHandler(GET, "/_aliases", this);
controller.registerHandler(GET, "/_alias/{name}", this);
controller.registerHandler(HEAD, "/_alias/{name}", this);
controller.registerHandler(GET, "/{index}/_alias", this);
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllAliasesAction.java
deleted file mode 100644
index 87cadbafd8321..0000000000000
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllAliasesAction.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.rest.action.admin.indices;
-
-import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
-import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
-import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
-import org.elasticsearch.action.support.IndicesOptions;
-import org.elasticsearch.client.node.NodeClient;
-import org.elasticsearch.cluster.metadata.AliasMetaData;
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.ToXContent.Params;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.rest.BaseRestHandler;
-import org.elasticsearch.rest.BytesRestResponse;
-import org.elasticsearch.rest.RestController;
-import org.elasticsearch.rest.RestRequest;
-import org.elasticsearch.rest.RestResponse;
-import org.elasticsearch.rest.action.RestBuilderListener;
-
-import java.io.IOException;
-import java.util.List;
-
-import static org.elasticsearch.rest.RestRequest.Method.GET;
-import static org.elasticsearch.rest.RestStatus.OK;
-
-/**
- * The REST handler for retrieving all aliases
- */
-public class RestGetAllAliasesAction extends BaseRestHandler {
-
- public RestGetAllAliasesAction(final Settings settings, final RestController controller) {
- super(settings);
- controller.registerHandler(GET, "/_alias", this);
- controller.registerHandler(GET, "/_aliases", this);
- }
-
- @Override
- public String getName() {
- return "get_all_aliases_action";
- }
-
- @Override
- public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
- final GetIndexRequest getIndexRequest = new GetIndexRequest();
- getIndexRequest.indices(Strings.EMPTY_ARRAY);
- getIndexRequest.features(Feature.ALIASES);
- getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions()));
- getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local()));
- getIndexRequest.humanReadable(request.paramAsBoolean("human", false));
- return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) {
-
- @Override
- public RestResponse buildResponse(final GetIndexResponse response, final XContentBuilder builder) throws Exception {
- builder.startObject();
- {
- for (final String index : response.indices()) {
- builder.startObject(index);
- {
- writeAliases(response.aliases().get(index), builder, request);
- }
- builder.endObject();
- }
- }
- builder.endObject();
-
- return new BytesRestResponse(OK, builder);
- }
-
- private void writeAliases(final List aliases, final XContentBuilder builder,
- final Params params) throws IOException {
- builder.startObject("aliases");
- {
- if (aliases != null) {
- for (final AliasMetaData alias : aliases) {
- AliasMetaData.Builder.toXContent(alias, builder, params);
- }
- }
- }
- builder.endObject();
- }
- });
- }
-
-}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java
index 7e39a6262d225..2cde321230ebc 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java
@@ -25,7 +25,6 @@
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
@@ -147,20 +146,20 @@ protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucket
finishLeaf();
boolean fillDocIdSet = deferredCollectors != NO_OP_COLLECTOR;
if (sortedDocsProducer != null) {
- /**
- * The producer will visit documents sorted by the leading source of the composite definition
- * and terminates when the leading source value is guaranteed to be greater than the lowest
- * composite bucket in the queue.
+ /*
+ The producer will visit documents sorted by the leading source of the composite definition
+ and terminates when the leading source value is guaranteed to be greater than the lowest
+ composite bucket in the queue.
*/
DocIdSet docIdSet = sortedDocsProducer.processLeaf(context.query(), queue, ctx, fillDocIdSet);
if (fillDocIdSet) {
entries.add(new Entry(ctx, docIdSet));
}
- /**
- * We can bypass search entirely for this segment, all the processing has been done in the previous call.
- * Throwing this exception will terminate the execution of the search for this root aggregation,
- * see {@link MultiCollector} for more details on how we handle early termination in aggregations.
+ /*
+ We can bypass search entirely for this segment, all the processing has been done in the previous call.
+ Throwing this exception will terminate the execution of the search for this root aggregation,
+ see {@link org.apache.lucene.search.MultiCollector} for more details on how we handle early termination in aggregations.
*/
throw new CollectionTerminatedException();
} else {
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java
index 4343a1ebca564..e5ff7abc68b34 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java
@@ -23,7 +23,6 @@
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.SourceFieldMapper;
diff --git a/server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java b/server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java
index 5ce8959601798..c19a14d836422 100644
--- a/server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java
+++ b/server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java
@@ -19,8 +19,6 @@
package org.elasticsearch.monitor.jvm;
-import java.lang.ProcessHandle;
-
class JvmPid {
static long getPid() {
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java
index 524d522153fe5..d9796847efa77 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java
@@ -49,7 +49,6 @@
import java.util.Map;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java
index 27230eb518ebe..fd6f68d4200da 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java
@@ -18,7 +18,6 @@
*/
package org.elasticsearch.action.admin.cluster.node.tasks;
-import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java
index 24d9633bc5154..f3033b017db98 100644
--- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java
+++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java
@@ -63,7 +63,6 @@
import java.util.List;
import java.util.Set;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state;
diff --git a/settings.gradle b/settings.gradle
index 592d7cc892bda..7a72baf1c4195 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -34,7 +34,6 @@ List projects = [
'server',
'server:cli',
'test:framework',
- 'test:fixtures:example-fixture',
'test:fixtures:hdfs-fixture',
'test:fixtures:krb5kdc-fixture',
'test:fixtures:old-elasticsearch',
diff --git a/test/fixtures/example-fixture/build.gradle b/test/fixtures/example-fixture/build.gradle
deleted file mode 100644
index ce562e89abb7f..0000000000000
--- a/test/fixtures/example-fixture/build.gradle
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-apply plugin: 'elasticsearch.build'
-test.enabled = false
-// Not published so no need to assemble
-tasks.remove(assemble)
-build.dependsOn.remove('assemble')
-
-dependenciesInfo.enabled = false
diff --git a/test/fixtures/example-fixture/src/main/java/example/ExampleTestFixture.java b/test/fixtures/example-fixture/src/main/java/example/ExampleTestFixture.java
deleted file mode 100644
index 96103d8eaa900..0000000000000
--- a/test/fixtures/example-fixture/src/main/java/example/ExampleTestFixture.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package example;
-
-import com.sun.net.httpserver.HttpServer;
-
-import java.lang.management.ManagementFactory;
-import java.net.Inet6Address;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.StandardCopyOption;
-import java.util.Collections;
-
-/** Crappy example test fixture that responds with TEST and closes the connection */
-public class ExampleTestFixture {
- public static void main(String args[]) throws Exception {
- if (args.length != 1) {
- throw new IllegalArgumentException("ExampleTestFixture ");
- }
- Path dir = Paths.get(args[0]);
-
- final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0);
- final HttpServer httpServer = HttpServer.create(socketAddress, 0);
-
- // write pid file
- Path tmp = Files.createTempFile(dir, null, null);
- String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
- Files.write(tmp, Collections.singleton(pid));
- Files.move(tmp, dir.resolve("pid"), StandardCopyOption.ATOMIC_MOVE);
-
- // write port file
- tmp = Files.createTempFile(dir, null, null);
- InetSocketAddress bound = httpServer.getAddress();
- if (bound.getAddress() instanceof Inet6Address) {
- Files.write(tmp, Collections.singleton("[" + bound.getHostString() + "]:" + bound.getPort()));
- } else {
- Files.write(tmp, Collections.singleton(bound.getHostString() + ":" + bound.getPort()));
- }
- Files.move(tmp, dir.resolve("ports"), StandardCopyOption.ATOMIC_MOVE);
-
- final byte[] response = "TEST\n".getBytes(StandardCharsets.UTF_8);
-
- // go time
- httpServer.createContext("/", exchange -> {
- try {
- exchange.sendResponseHeaders(200, response.length);
- exchange.getResponseBody().write(response);
- } finally {
- exchange.close();
- }
- });
- httpServer.start();
-
- // wait forever, until you kill me
- Thread.sleep(Long.MAX_VALUE);
- }
-}
diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java
index 4450ec54d0863..ab18e359458bd 100644
--- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java
@@ -74,7 +74,6 @@
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
diff --git a/test/framework/src/main/java/org/elasticsearch/test/fixture/AbstractHttpFixture.java b/test/framework/src/main/java/org/elasticsearch/test/fixture/AbstractHttpFixture.java
new file mode 100644
index 0000000000000..daa70298224d0
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/fixture/AbstractHttpFixture.java
@@ -0,0 +1,312 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.fixture;
+
+import com.sun.net.httpserver.HttpServer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.management.ManagementFactory;
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.net.URI;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static java.util.Collections.singleton;
+import static java.util.Collections.singletonList;
+import static java.util.Collections.singletonMap;
+
+/**
+ * Base class for test fixtures that requires a {@link HttpServer} to work.
+ */
+public abstract class AbstractHttpFixture {
+
+ protected static final Map TEXT_PLAIN_CONTENT_TYPE = contentType("text/plain; charset=utf-8");
+ protected static final Map JSON_CONTENT_TYPE = contentType("application/json; charset=utf-8");
+
+ protected static final byte[] EMPTY_BYTE = new byte[0];
+
+ /** Increments for the requests ids **/
+ private final AtomicLong requests = new AtomicLong(0);
+
+ /** Current working directory of the fixture **/
+ private final Path workingDirectory;
+
+ protected AbstractHttpFixture(final String workingDir) {
+ this.workingDirectory = Paths.get(Objects.requireNonNull(workingDir));
+ }
+
+ /**
+ * Opens a {@link HttpServer} and start listening on a random port.
+ */
+ public final void listen() throws IOException, InterruptedException {
+ final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0);
+ final HttpServer httpServer = HttpServer.create(socketAddress, 0);
+
+ try {
+ /// Writes the PID of the current Java process in a `pid` file located in the working directory
+ writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]);
+
+ final String addressAndPort = addressToString(httpServer.getAddress());
+ // Writes the address and port of the http server in a `ports` file located in the working directory
+ writeFile(workingDirectory, "ports", addressAndPort);
+
+ httpServer.createContext("/", exchange -> {
+ try {
+ Response response;
+
+ // Check if this is a request made by the AntFixture
+ final String userAgent = exchange.getRequestHeaders().getFirst("User-Agent");
+ if (userAgent != null
+ && userAgent.startsWith("Apache Ant")
+ && "GET".equals(exchange.getRequestMethod())
+ && "/".equals(exchange.getRequestURI().getPath())) {
+ response = new Response(200, TEXT_PLAIN_CONTENT_TYPE, "OK".getBytes(UTF_8));
+
+ } else {
+ try {
+ final long requestId = requests.getAndIncrement();
+ final String method = exchange.getRequestMethod();
+
+
+ final Map headers = new HashMap<>();
+ for (Map.Entry> header : exchange.getRequestHeaders().entrySet()) {
+ headers.put(header.getKey(), exchange.getRequestHeaders().getFirst(header.getKey()));
+ }
+
+ final ByteArrayOutputStream body = new ByteArrayOutputStream();
+ try (InputStream requestBody = exchange.getRequestBody()) {
+ final byte[] buffer = new byte[1024];
+ int i;
+ while ((i = requestBody.read(buffer, 0, buffer.length)) != -1) {
+ body.write(buffer, 0, i);
+ }
+ body.flush();
+ }
+
+ final Request request = new Request(requestId, method, exchange.getRequestURI(), headers, body.toByteArray());
+ response = handle(request);
+
+ } catch (Exception e) {
+ final String error = e.getMessage() != null ? e.getMessage() : "Exception when processing the request";
+ response = new Response(500, singletonMap("Content-Type", "text/plain; charset=utf-8"), error.getBytes(UTF_8));
+ }
+ }
+
+ if (response == null) {
+ response = new Response(400, TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
+ }
+
+ response.headers.forEach((k, v) -> exchange.getResponseHeaders().put(k, singletonList(v)));
+ if (response.body.length > 0) {
+ exchange.sendResponseHeaders(response.status, response.body.length);
+ exchange.getResponseBody().write(response.body);
+ } else {
+ exchange.sendResponseHeaders(response.status, -1);
+ }
+ } finally {
+ exchange.close();
+ }
+ });
+ httpServer.start();
+
+ // Wait to be killed
+ Thread.sleep(Long.MAX_VALUE);
+
+ } finally {
+ httpServer.stop(0);
+ }
+ }
+
+ protected abstract Response handle(Request request) throws IOException;
+
+ @FunctionalInterface
+ public interface RequestHandler {
+ Response handle(Request request) throws IOException;
+ }
+
+ /**
+ * Represents a HTTP Response.
+ */
+ protected static class Response {
+
+ private final int status;
+ private final Map headers;
+ private final byte[] body;
+
+ public Response(final int status, final Map headers, final byte[] body) {
+ this.status = status;
+ this.headers = Objects.requireNonNull(headers);
+ this.body = Objects.requireNonNull(body);
+ }
+
+ public int getStatus() {
+ return status;
+ }
+
+ public Map getHeaders() {
+ return headers;
+ }
+
+ public byte[] getBody() {
+ return body;
+ }
+
+ public String getContentType() {
+ for (String header : headers.keySet()) {
+ if (header.equalsIgnoreCase("Content-Type")) {
+ return headers.get(header);
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public String toString() {
+ return "Response{" +
+ "status=" + status +
+ ", headers=" + headers +
+ ", body=" + new String(body, UTF_8) +
+ '}';
+ }
+ }
+
+ /**
+ * Represents a HTTP Request.
+ */
+ protected static class Request {
+
+ private final long id;
+ private final String method;
+ private final URI uri;
+ private final Map parameters;
+ private final Map headers;
+ private final byte[] body;
+
+ public Request(final long id, final String method, final URI uri, final Map headers, final byte[] body) {
+ this.id = id;
+ this.method = Objects.requireNonNull(method);
+ this.uri = Objects.requireNonNull(uri);
+ this.headers = Objects.requireNonNull(headers);
+ this.body = Objects.requireNonNull(body);
+
+ final Map params = new HashMap<>();
+ if (uri.getQuery() != null && uri.getQuery().length() > 0) {
+ for (String param : uri.getQuery().split("&")) {
+ int i = param.indexOf("=");
+ if (i > 0) {
+ params.put(param.substring(0, i), param.substring(i + 1));
+ } else {
+ params.put(param, "");
+ }
+ }
+ }
+ this.parameters = params;
+ }
+
+ public long getId() {
+ return id;
+ }
+
+ public String getMethod() {
+ return method;
+ }
+
+ public Map getHeaders() {
+ return headers;
+ }
+
+ public String getHeader(final String headerName) {
+ for (String header : headers.keySet()) {
+ if (header.equalsIgnoreCase(headerName)) {
+ return headers.get(header);
+ }
+ }
+ return null;
+ }
+
+ public byte[] getBody() {
+ return body;
+ }
+
+ public String getPath() {
+ return uri.getRawPath();
+ }
+
+ public Map getParameters() {
+ return parameters;
+ }
+
+ public String getParam(final String paramName) {
+ for (String param : parameters.keySet()) {
+ if (param.equals(paramName)) {
+ return parameters.get(param);
+ }
+ }
+ return null;
+ }
+
+ public String getContentType() {
+ return getHeader("Content-Type");
+ }
+
+ @Override
+ public String toString() {
+ return "Request{" +
+ "method='" + method + '\'' +
+ ", uri=" + uri +
+ ", parameters=" + parameters +
+ ", headers=" + headers +
+ ", body=" + body +
+ '}';
+ }
+ }
+
+ private static void writeFile(final Path dir, final String fileName, final String content) throws IOException {
+ final Path tempPidFile = Files.createTempFile(dir, null, null);
+ Files.write(tempPidFile, singleton(content));
+ Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE);
+ }
+
+ private static String addressToString(final SocketAddress address) {
+ final InetSocketAddress inetSocketAddress = (InetSocketAddress) address;
+ if (inetSocketAddress.getAddress() instanceof Inet6Address) {
+ return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort();
+ } else {
+ return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort();
+ }
+ }
+
+ protected static Map contentType(final String contentType) {
+ return singletonMap("Content-Type", contentType);
+ }
+}
diff --git a/x-pack/docs/en/sql/language/data-types.asciidoc b/x-pack/docs/en/sql/language/data-types.asciidoc
index 322269bddaf8f..a01c2fda5c726 100644
--- a/x-pack/docs/en/sql/language/data-types.asciidoc
+++ b/x-pack/docs/en/sql/language/data-types.asciidoc
@@ -68,6 +68,7 @@ Consider the following `string` mapping:
}
}
----
+// NOTCONSOLE
The following SQL query:
diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle
index ac423c4281138..de4d3ada51aa6 100644
--- a/x-pack/plugin/build.gradle
+++ b/x-pack/plugin/build.gradle
@@ -43,7 +43,9 @@ subprojects {
final FileCollection classDirectories = project.files(files).filter { it.exists() }
doFirst {
- args('-cp', project.configurations.featureAwarePlugin.asPath, 'org.elasticsearch.xpack.test.feature_aware.FeatureAwareCheck')
+ String cp = project.configurations.featureAwarePlugin.asPath
+ cp = cp.replaceAll(":[^:]*/asm-debug-all-5.1.jar:", ":")
+ args('-cp', cp, 'org.elasticsearch.xpack.test.feature_aware.FeatureAwareCheck')
classDirectories.each { args it.getAbsolutePath() }
}
doLast {
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Operator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Operator.java
index bfe9b0e3589ba..ee156847d063b 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Operator.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Operator.java
@@ -12,8 +12,6 @@
import java.io.IOException;
import java.util.Locale;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
/**
* Enum representing logical comparisons on doubles
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java
index 47d4d30a7c6e4..dfd7c886ebf42 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java
@@ -16,7 +16,6 @@
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.action.XPackInfoAction;
-import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import java.util.ArrayList;
import java.util.Arrays;
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java
index aed6d2456014c..7f79ae35adac2 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java
@@ -14,7 +14,6 @@
import org.apache.xml.security.keys.content.X509Data;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.common.CheckedConsumer;
-import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.Loggers;
diff --git a/x-pack/qa/core-rest-tests-with-security/build.gradle b/x-pack/qa/core-rest-tests-with-security/build.gradle
index 1ef0a3a98ec6e..1daae6dc9f50a 100644
--- a/x-pack/qa/core-rest-tests-with-security/build.gradle
+++ b/x-pack/qa/core-rest-tests-with-security/build.gradle
@@ -15,6 +15,7 @@ integTestRunner {
['cat.aliases/10_basic/Empty cluster',
'index/10_with_id/Index with ID',
'indices.get_alias/10_basic/Get alias against closed indices',
+ 'indices.get_alias/20_empty/Check empty aliases when getting all aliases via /_alias',
'cat.templates/10_basic/No templates',
'cat.templates/10_basic/Sort templates',
'cat.templates/10_basic/Multiple template',
diff --git a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java
index f8d1dd5e2b717..6c999ca2a7291 100644
--- a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java
+++ b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java
@@ -95,7 +95,7 @@ private boolean getMonitoringUsageExportersDefined() throws Exception {
public void testHTTPExporterWithSSL() throws Exception {
// Ensures that the exporter is actually on
assertBusy(() -> assertThat("[_http] exporter is not defined", getMonitoringUsageExportersDefined(), is(true)));
-
+
// Checks that the monitoring index templates have been installed
assertBusy(() -> {
GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates(MONITORING_PATTERN).get();