diff --git a/.builder/actions/mock_server_setup.py b/.builder/actions/mock_server_setup.py
new file mode 100644
index 000000000..ee54e3472
--- /dev/null
+++ b/.builder/actions/mock_server_setup.py
@@ -0,0 +1,42 @@
+"""
+Setup local mock server for tests
+"""
+
+import Builder
+
+import os
+import sys
+import subprocess
+import atexit
+
+
+class MockServerSetup(Builder.Action):
+ """
+ Set up this machine for running the mock server test
+
+ This action should be run in the 'pre_build_steps' or 'build_steps' stage.
+ """
+
+ def run(self, env):
+ self.env = env
+ python_path = sys.executable
+ # install dependency for mock server
+ self.env.shell.exec(python_path,
+ '-m', 'pip', 'install', 'h11', 'trio', check=True)
+ # check the deps can be import correctly
+ self.env.shell.exec(python_path,
+ '-c', 'import h11, trio', check=True)
+
+ # set cmake flag so mock server tests are enabled
+ env.project.config['cmake_args'].append(
+ '-DENABLE_MOCK_SERVER_TESTS=ON')
+
+ base_dir = os.path.dirname(os.path.realpath(__file__))
+ dir = os.path.join(base_dir, "..", "..", "tests", "mock_s3_server")
+ os.chdir(dir)
+
+ p = subprocess.Popen([python_path, "mock_s3_server.py"])
+
+ @atexit.register
+ def close_mock_server():
+ p.terminate()
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cdb528c09..6e8387a43 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -7,6 +7,8 @@ if (POLICY CMP0069)
endif()
option(ASSERT_LOCK_HELD "Enable ASSERT_SYNCED_DATA_LOCK_HELD for checking thread issue" OFF)
+option(ENABLE_MOCK_SERVER_TESTS "Whether to run the integration tests that rely on pre-configured mock server" OFF)
+
if (ASSERT_LOCK_HELD)
add_definitions(-DASSERT_LOCK_HELD)
endif()
diff --git a/builder.json b/builder.json
index 9d1cbaafc..44a260e8e 100644
--- a/builder.json
+++ b/builder.json
@@ -16,5 +16,6 @@
],
"test_steps": [
"test"
- ]
+ ],
+ "pre_build_steps": ["mock-server-setup"]
}
diff --git a/source/s3_meta_request.c b/source/s3_meta_request.c
index b5e41b2fa..8b4ef76a1 100644
--- a/source/s3_meta_request.c
+++ b/source/s3_meta_request.c
@@ -319,7 +319,9 @@ void aws_s3_meta_request_set_fail_synced(
meta_request->synced_data.finish_result_set = true;
- if (error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS && failed_request != NULL) {
+ if ((error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS ||
+ error_code == AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR) &&
+ failed_request != NULL) {
aws_s3_meta_request_result_setup(
meta_request,
&meta_request->synced_data.finish_result,
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index bfbdae84c..2766bcb72 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -216,5 +216,12 @@ add_test_case(test_s3_list_bucket_init_mem_safety)
add_test_case(test_s3_list_bucket_init_mem_safety_optional_copies)
add_net_test_case(test_s3_list_bucket_valid)
+# Tests against local mock server
+if (ENABLE_MOCK_SERVER_TESTS)
+ add_net_test_case(multipart_upload_mock_server)
+ add_net_test_case(async_internal_error_from_complete_multipart_mock_server)
+ add_net_test_case(async_access_denied_from_complete_multipart_mock_server)
+endif()
+
set(TEST_BINARY_NAME ${PROJECT_NAME}-tests)
generate_test_driver(${TEST_BINARY_NAME})
diff --git a/tests/mock_s3_server/AbortMultipartUpload/default.json b/tests/mock_s3_server/AbortMultipartUpload/default.json
new file mode 100644
index 000000000..5b42b7270
--- /dev/null
+++ b/tests/mock_s3_server/AbortMultipartUpload/default.json
@@ -0,0 +1,6 @@
+{
+ "status": 204,
+ "headers": {"Connection": "keep-alive"},
+ "body": [
+ ]
+}
diff --git a/tests/mock_s3_server/CompleteMultipartUpload/async_access_denied_error.json b/tests/mock_s3_server/CompleteMultipartUpload/async_access_denied_error.json
new file mode 100644
index 000000000..ade62888d
--- /dev/null
+++ b/tests/mock_s3_server/CompleteMultipartUpload/async_access_denied_error.json
@@ -0,0 +1,14 @@
+{
+ "status": 200,
+ "headers": {"Connection": "close"},
+ "body": [
+ "",
+ "",
+ "",
+ "AccessDenied
",
+ "Access denied.",
+ "656c76696e6727732072657175657374",
+ "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==",
+ ""
+ ]
+ }
diff --git a/tests/mock_s3_server/CompleteMultipartUpload/async_internal_error.json b/tests/mock_s3_server/CompleteMultipartUpload/async_internal_error.json
new file mode 100644
index 000000000..01a871a52
--- /dev/null
+++ b/tests/mock_s3_server/CompleteMultipartUpload/async_internal_error.json
@@ -0,0 +1,14 @@
+{
+ "status": 200,
+ "headers": {"Connection": "close"},
+ "body": [
+ "",
+ "",
+ "",
+ "InternalError
",
+ "We encountered an internal error. Please try again.",
+ "656c76696e6727732072657175657374",
+ "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==",
+ ""
+ ]
+}
diff --git a/tests/mock_s3_server/CompleteMultipartUpload/default.json b/tests/mock_s3_server/CompleteMultipartUpload/default.json
new file mode 100644
index 000000000..44650c702
--- /dev/null
+++ b/tests/mock_s3_server/CompleteMultipartUpload/default.json
@@ -0,0 +1,13 @@
+{
+ "status": 200,
+ "headers": {"Connection": "close"},
+ "body": [
+ "",
+ "",
+ "http://default.s3.us-west-2.amazonaws.com/default",
+ "default",
+ "default",
+ "\"3858f62230ac3c915f300c664312c11f-9\"",
+ ""
+ ]
+}
diff --git a/tests/mock_s3_server/CreateMultipartUpload/default.json b/tests/mock_s3_server/CreateMultipartUpload/default.json
new file mode 100644
index 000000000..4a7809f9b
--- /dev/null
+++ b/tests/mock_s3_server/CreateMultipartUpload/default.json
@@ -0,0 +1,12 @@
+{
+ "status": 200,
+ "headers": {},
+ "body": [
+ "",
+ "",
+ "default",
+ "default",
+ "defaultID",
+ ""
+ ]
+}
diff --git a/tests/mock_s3_server/README.md b/tests/mock_s3_server/README.md
new file mode 100644
index 000000000..c80420df1
--- /dev/null
+++ b/tests/mock_s3_server/README.md
@@ -0,0 +1,40 @@
+# Mock S3 server
+
+A **NON-TLS** mock S3 server based on [python-hyper/h11](https://github.com/python-hyper/h11) and [trio](http://trio.readthedocs.io/en/latest/index.html). The server code implementation is based on the trio-server example from python-hyper/h11 [here](https://github.com/python-hyper/h11/blob/master/examples/trio-server.py). Only supports very basic mock response for request received.
+
+## How to run the server
+
+Python 3.5+ required.
+
+- Install hyper/h11 and trio python module. `python3 -m pip install h11 trio`
+- Run python. `python3 ./mock_s3_server.py`.
+
+### Supported Operations
+
+- CreateMultipartUpload
+- CompleteMultipartUpload
+- UploadPart
+- AbortMultipartUpload
+
+### Defined response
+
+The server will read from ./{OperationName}/{Key}.json. The json file is formatted as following:
+
+```json
+{
+ "status": 200,
+ "headers": {"Connection": "close"},
+ "body": [
+ "",
+ "",
+ "",
+ "InternalError
",
+ "We encountered an internal error. Please try again.",
+ "656c76696e6727732072657175657374",
+ "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==",
+ ""
+ ]
+}
+```
+
+Where you can define the expected response status, header and response body. If the {Key}.json is not found from file system, it will load the `default.json`.
diff --git a/tests/mock_s3_server/UploadPart/default.json b/tests/mock_s3_server/UploadPart/default.json
new file mode 100644
index 000000000..2ad6254c0
--- /dev/null
+++ b/tests/mock_s3_server/UploadPart/default.json
@@ -0,0 +1,6 @@
+{
+ "status": 200,
+ "headers": {"ETag": "b54357faf0632cce46e942fa68356b38", "Connection": "keep-alive"},
+ "body": [
+ ]
+}
diff --git a/tests/mock_s3_server/mock_s3_server.py b/tests/mock_s3_server/mock_s3_server.py
new file mode 100644
index 000000000..8a60f6269
--- /dev/null
+++ b/tests/mock_s3_server/mock_s3_server.py
@@ -0,0 +1,343 @@
+# A simple HTTP server implemented using h11 and Trio:
+# http://trio.readthedocs.io/en/latest/index.html
+#
+
+import datetime
+import email.utils
+import json
+from itertools import count
+from urllib.parse import parse_qs, urlparse
+import os
+from enum import Enum
+
+import trio
+
+import h11
+
+MAX_RECV = 2**16
+TIMEOUT = 10
+
+
+class S3Opts(Enum):
+ CreateMultipartUpload = 1
+ CompleteMultipartUpload = 2
+ UploadPart = 3
+ AbortMultipartUpload = 4
+
+
+base_dir = os.path.dirname(os.path.realpath(__file__))
+
+# We are using email.utils.format_datetime to generate the Date header.
+# It may sound weird, but it actually follows the RFC.
+# Please see: https://stackoverflow.com/a/59416334/14723771
+#
+# See also:
+# [1] https://www.rfc-editor.org/rfc/rfc9110#section-5.6.7
+# [2] https://www.rfc-editor.org/rfc/rfc7231#section-7.1.1.1
+# [3] https://www.rfc-editor.org/rfc/rfc5322#section-3.3
+
+
+def format_date_time(dt=None):
+ """Generate a RFC 7231 / RFC 9110 IMF-fixdate string"""
+ if dt is None:
+ dt = datetime.datetime.now(datetime.timezone.utc)
+ return email.utils.format_datetime(dt, usegmt=True)
+
+
+################################################################
+# I/O adapter: h11 <-> trio
+################################################################
+
+# The core of this could be factored out to be usable for trio-based clients
+# too, as well as servers. But as a simplified pedagogical example we don't
+# attempt this here.
+class TrioHTTPWrapper:
+ _next_id = count()
+
+ def __init__(self, stream):
+ self.stream = stream
+ self.conn = h11.Connection(h11.SERVER)
+ # Our Server: header
+ self.ident = " ".join(
+ ["h11-example-trio-server/{}".format(h11.__version__),
+ h11.PRODUCT_ID]
+ ).encode("ascii")
+ # A unique id for this connection, to include in debugging output
+ # (useful for understanding what's going on if there are multiple
+ # simultaneous clients).
+ self._obj_id = next(TrioHTTPWrapper._next_id)
+
+ async def send(self, event):
+ # The code below doesn't send ConnectionClosed, so we don't bother
+ # handling it here either -- it would require that we do something
+ # appropriate when 'data' is None.
+ assert type(event) is not h11.ConnectionClosed
+ data = self.conn.send(event)
+ try:
+ await self.stream.send_all(data)
+ except BaseException:
+ # If send_all raises an exception (especially trio.Cancelled),
+ # we have no choice but to give it up.
+ self.conn.send_failed()
+ raise
+
+ async def _read_from_peer(self):
+ if self.conn.they_are_waiting_for_100_continue:
+ self.info("Sending 100 Continue")
+ go_ahead = h11.InformationalResponse(
+ status_code=100, headers=self.basic_headers()
+ )
+ await self.send(go_ahead)
+ try:
+ data = await self.stream.receive_some(MAX_RECV)
+ except ConnectionError:
+ # They've stopped listening. Not much we can do about it here.
+ data = b""
+ self.conn.receive_data(data)
+
+ async def next_event(self):
+ while True:
+ event = self.conn.next_event()
+ if event is h11.NEED_DATA:
+ await self._read_from_peer()
+ continue
+ return event
+
+ async def shutdown_and_clean_up(self):
+ # When this method is called, it's because we definitely want to kill
+ # this connection, either as a clean shutdown or because of some kind
+ # of error or loss-of-sync bug, and we no longer care if that violates
+ # the protocol or not. So we ignore the state of self.conn, and just
+ # go ahead and do the shutdown on the socket directly. (If you're
+ # implementing a client you might prefer to send ConnectionClosed()
+ # and let it raise an exception if that violates the protocol.)
+ #
+ try:
+ await self.stream.send_eof()
+ except trio.BrokenResourceError:
+ # They're already gone, nothing to do
+ return
+ # Wait and read for a bit to give them a chance to see that we closed
+ # things, but eventually give up and just close the socket.
+ # XX FIXME: possibly we should set SO_LINGER to 0 here, so
+ # that in the case where the client has ignored our shutdown and
+ # declined to initiate the close themselves, we do a violent shutdown
+ # (RST) and avoid the TIME_WAIT?
+ # it looks like nginx never does this for keepalive timeouts, and only
+ # does it for regular timeouts (slow clients I guess?) if explicitly
+ # enabled ("Default: reset_timedout_connection off")
+ with trio.move_on_after(TIMEOUT):
+ try:
+ while True:
+ # Attempt to read until EOF
+ got = await self.stream.receive_some(MAX_RECV)
+ if not got:
+ break
+ except trio.BrokenResourceError:
+ pass
+ finally:
+ await self.stream.aclose()
+
+ def basic_headers(self):
+ # HTTP requires these headers in all responses (client would do
+ # something different here)
+ return [
+ ("Date", format_date_time().encode("ascii")),
+ ("Server", self.ident),
+ ]
+
+ def info(self, *args):
+ # Little debugging method
+ print("{}:".format(self._obj_id), *args)
+
+
+################################################################
+# Server main loop
+################################################################
+
+# General theory:
+#
+# If everything goes well:
+# - we'll get a Request
+# - our response handler will read the request body and send a full response
+# - that will either leave us in MUST_CLOSE (if the client doesn't
+# support keepalive) or DONE/DONE (if the client does).
+#
+# But then there are many, many different ways that things can go wrong
+# here. For example:
+# - we don't actually get a Request, but rather a ConnectionClosed
+# - exception is raised from somewhere (naughty client, broken
+# response handler, whatever)
+# - depending on what went wrong and where, we might or might not be
+# able to send an error response, and the connection might or
+# might not be salvagable after that
+# - response handler doesn't fully read the request or doesn't send a
+# full response
+#
+# But these all have one thing in common: they involve us leaving the
+# nice easy path up above. So we can just proceed on the assumption
+# that the nice easy thing is what's happening, and whenever something
+# goes wrong do our best to get back onto that path, and h11 will keep
+# track of how successful we were and raise new errors if things don't work
+# out.
+async def http_serve(stream):
+ wrapper = TrioHTTPWrapper(stream)
+ wrapper.info("Got new connection")
+ while True:
+ assert wrapper.conn.states == {
+ h11.CLIENT: h11.IDLE, h11.SERVER: h11.IDLE}
+
+ try:
+ with trio.fail_after(TIMEOUT):
+ wrapper.info("Server main loop waiting for request")
+ event = await wrapper.next_event()
+ wrapper.info("Server main loop got event:", event)
+ if type(event) is h11.Request:
+ await handle_mock_s3_request(wrapper, event)
+ except Exception as exc:
+ wrapper.info("Error during response handler: {!r}".format(exc))
+ await maybe_send_error_response(wrapper, exc)
+
+ if wrapper.conn.our_state is h11.MUST_CLOSE:
+ wrapper.info("connection is not reusable, so shutting down")
+ await wrapper.shutdown_and_clean_up()
+ return
+ else:
+ try:
+ wrapper.info("trying to re-use connection")
+ wrapper.conn.start_next_cycle()
+ except h11.ProtocolError:
+ states = wrapper.conn.states
+ wrapper.info("unexpected state", states, "-- bailing out")
+ await maybe_send_error_response(
+ wrapper, RuntimeError("unexpected state {}".format(states))
+ )
+ await wrapper.shutdown_and_clean_up()
+ return
+
+
+################################################################
+# Actual response handlers
+################################################################
+
+# Helper function
+
+def parse_request_path(request_path):
+ parsed_path = urlparse(request_path)
+ parsed_query = parse_qs(parsed_path.query)
+ return parsed_path, parsed_query
+
+
+async def send_simple_response(wrapper, status_code, content_type, body):
+ wrapper.info("Sending", status_code, "response with", len(body), "bytes")
+ headers = wrapper.basic_headers()
+ headers.append(("Content-Type", content_type))
+ headers.append(("Content-Length", str(len(body))))
+ res = h11.Response(status_code=status_code, headers=headers)
+ await wrapper.send(res)
+ await wrapper.send(h11.Data(data=body))
+ await wrapper.send(h11.EndOfMessage())
+
+
+async def send_response_from_json(wrapper, response_json_path, chunked=False):
+ with open(response_json_path, 'r') as f:
+ data = json.load(f)
+
+ status_code = data['status']
+ body = "\n".join(data['body'])
+ wrapper.info("Sending", status_code,
+ "response with", len(body), "bytes")
+
+ headers = wrapper.basic_headers()
+ for header in data['headers'].items():
+ headers.append((header[0], header[1]))
+
+ if chunked:
+ headers.append(('Transfer-Encoding', "chunked"))
+ res = h11.Response(status_code=status_code, headers=headers)
+ await wrapper.send(res)
+ await wrapper.send(h11.Data(data=b"%X\r\n%s\r\n" % (len(body), body.encode())))
+ else:
+ headers.append(("Content-Length", str(len(body))))
+ res = h11.Response(status_code=status_code, headers=headers)
+ await wrapper.send(res)
+ await wrapper.send(h11.Data(data=body.encode()))
+
+ await wrapper.send(h11.EndOfMessage())
+
+
+async def send_mock_s3_response(wrapper, request_type, path):
+ response_file = os.path.join(
+ base_dir, request_type.name, f"{path[1:]}.json")
+ if os.path.exists(response_file) == False:
+ wrapper.info(response_file, "not exist, using the default response")
+ response_file = os.path.join(
+ base_dir, request_type.name, f"default.json")
+ await send_response_from_json(wrapper, response_file)
+
+
+async def maybe_send_error_response(wrapper, exc):
+ # If we can't send an error, oh well, nothing to be done
+ wrapper.info("trying to send error response...")
+ if wrapper.conn.our_state not in {h11.IDLE, h11.SEND_RESPONSE}:
+ wrapper.info("...but I can't, because our state is",
+ wrapper.conn.our_state)
+ return
+ try:
+ if isinstance(exc, h11.RemoteProtocolError):
+ status_code = exc.error_status_hint
+ elif isinstance(exc, trio.TooSlowError):
+ status_code = 408 # Request Timeout
+ else:
+ status_code = 500
+ body = str(exc).encode("utf-8")
+ await send_simple_response(
+ wrapper, status_code, "text/plain; charset=utf-8", body
+ )
+ except Exception as exc:
+ wrapper.info("error while sending error response:", exc)
+
+
+async def handle_mock_s3_request(wrapper, request):
+ parsed_path, parsed_query = parse_request_path(
+ request.target.decode("ascii"))
+ if request.method == b"POST":
+ if parsed_path.query == "uploads":
+ # POST /{Key+}?uploads HTTP/1.1 -- Create MPU
+ request_type = S3Opts.CreateMultipartUpload
+ else:
+ # POST /Key+?uploadId=UploadId HTTP/1.1 -- Complete MPU
+ request_type = S3Opts.CompleteMultipartUpload
+
+ elif request.method == b"PUT":
+ request_type = S3Opts.UploadPart
+ elif request.method == b"DELETE":
+ request_type = S3Opts.AbortMultipartUpload
+ else:
+ # TODO: support more type.
+ request_type = S3Opts.CreateMultipartUpload
+
+ while True:
+ event = await wrapper.next_event()
+ if type(event) is h11.EndOfMessage:
+ break
+ assert type(event) is h11.Data
+
+ await send_mock_s3_response(
+ wrapper, request_type, parsed_path.path
+ )
+
+
+async def serve(port):
+ print("listening on http://localhost:{}".format(port))
+ try:
+ await trio.serve_tcp(http_serve, port)
+ except KeyboardInterrupt:
+ print("KeyboardInterrupt - shutting down")
+
+
+################################################################
+# Run the server
+################################################################
+if __name__ == "__main__":
+ trio.run(serve, 8080)
diff --git a/tests/s3_data_plane_tests.c b/tests/s3_data_plane_tests.c
index adabeb5d2..66cf9a61b 100644
--- a/tests/s3_data_plane_tests.c
+++ b/tests/s3_data_plane_tests.c
@@ -3185,7 +3185,7 @@ static int s_test_s3_bad_endpoint(struct aws_allocator *allocator, void *ctx) {
struct aws_byte_cursor test_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("test_key");
- AWS_STATIC_STRING_FROM_LITERAL(invalid_host_name, "invalid_host_name");
+ AWS_STATIC_STRING_FROM_LITERAL(invalid_host_name, "invalid_host_name_totally_absolutely");
/* Construct a message that points to an invalid host name. Key can be anything. */
struct aws_http_message *message =
@@ -3201,7 +3201,9 @@ static int s_test_s3_bad_endpoint(struct aws_allocator *allocator, void *ctx) {
ASSERT_SUCCESS(aws_s3_tester_send_meta_request(&tester, client, &options, &meta_request_test_results, 0));
- ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_IO_DNS_INVALID_NAME);
+ ASSERT_TRUE(
+ meta_request_test_results.finished_error_code == AWS_IO_DNS_INVALID_NAME ||
+ meta_request_test_results.finished_error_code == AWS_IO_DNS_QUERY_FAILED);
aws_s3_meta_request_test_results_clean_up(&meta_request_test_results);
diff --git a/tests/s3_mock_server_tests.c b/tests/s3_mock_server_tests.c
new file mode 100644
index 000000000..6c0f5195a
--- /dev/null
+++ b/tests/s3_mock_server_tests.c
@@ -0,0 +1,146 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_util.h"
+#include "aws/s3/s3_client.h"
+#include "s3_tester.h"
+#include
+#include
+#include
+
+#define TEST_CASE(NAME) \
+ AWS_TEST_CASE(NAME, s_test_##NAME); \
+ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx)
+
+#define DEFINE_HEADER(NAME, VALUE) \
+ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), }
+
+TEST_CASE(multipart_upload_mock_server) {
+ (void)ctx;
+
+ struct aws_s3_tester tester;
+ ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));
+ struct aws_s3_tester_client_options client_options = {
+ .part_size = MB_TO_BYTES(5),
+ .tls_usage = AWS_S3_TLS_DISABLED,
+ };
+
+ struct aws_s3_client *client = NULL;
+ ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client));
+
+ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/default");
+
+ struct aws_s3_tester_meta_request_options put_options = {
+ .allocator = allocator,
+ .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
+ .client = client,
+ .checksum_algorithm = AWS_SCA_CRC32,
+ .validate_get_response_checksum = false,
+ .put_options =
+ {
+ .object_size_mb = 10,
+ .object_path_override = object_path,
+ },
+ .mock_server = true,
+ };
+
+ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL));
+
+ aws_s3_client_release(client);
+ aws_s3_tester_clean_up(&tester);
+
+ return AWS_OP_SUCCESS;
+}
+
+TEST_CASE(async_internal_error_from_complete_multipart_mock_server) {
+ (void)ctx;
+
+ struct aws_s3_tester tester;
+ ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));
+ struct aws_s3_tester_client_options client_options = {
+ .part_size = MB_TO_BYTES(5),
+ .tls_usage = AWS_S3_TLS_DISABLED,
+ };
+
+ struct aws_s3_client *client = NULL;
+ ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client));
+
+ /* Checkout the ./mock_s3_server/CompleteMultipartUpload/async_internal_error.json for the response details */
+ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/async_internal_error");
+
+ struct aws_s3_tester_meta_request_options put_options = {
+ .allocator = allocator,
+ .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
+ .client = client,
+ .checksum_algorithm = AWS_SCA_CRC32,
+ .validate_get_response_checksum = false,
+ .put_options =
+ {
+ .object_size_mb = 10,
+ .object_path_override = object_path,
+ },
+ .mock_server = true,
+ .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE,
+ };
+ struct aws_s3_meta_request_test_results out_results;
+ aws_s3_meta_request_test_results_init(&out_results, allocator);
+
+ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results));
+
+ /* Internal error will be retried and failed with internal error. */
+ ASSERT_UINT_EQUALS(out_results.finished_error_code, AWS_ERROR_S3_INTERNAL_ERROR);
+
+ aws_s3_meta_request_test_results_clean_up(&out_results);
+ aws_s3_client_release(client);
+ aws_s3_tester_clean_up(&tester);
+
+ return AWS_OP_SUCCESS;
+}
+
+TEST_CASE(async_access_denied_from_complete_multipart_mock_server) {
+ (void)ctx;
+
+ struct aws_s3_tester tester;
+ ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));
+ struct aws_s3_tester_client_options client_options = {
+ .part_size = MB_TO_BYTES(5),
+ .tls_usage = AWS_S3_TLS_DISABLED,
+ };
+
+ struct aws_s3_client *client = NULL;
+ ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client));
+
+ /* Checkout the ./mock_s3_server/CompleteMultipartUpload/async_access_denied_error.json for the response details */
+ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/async_access_denied_error");
+
+ struct aws_s3_tester_meta_request_options put_options = {
+ .allocator = allocator,
+ .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
+ .client = client,
+ .checksum_algorithm = AWS_SCA_CRC32,
+ .validate_get_response_checksum = false,
+ .put_options =
+ {
+ .object_size_mb = 10,
+ .object_path_override = object_path,
+ },
+ .mock_server = true,
+ .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE,
+ };
+ struct aws_s3_meta_request_test_results out_results;
+ aws_s3_meta_request_test_results_init(&out_results, allocator);
+
+ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results));
+
+ ASSERT_UINT_EQUALS(out_results.finished_error_code, AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR);
+ ASSERT_UINT_EQUALS(out_results.headers_response_status, AWS_S3_RESPONSE_STATUS_SUCCESS);
+ ASSERT_TRUE(out_results.error_response_body.len != 0);
+
+ aws_s3_meta_request_test_results_clean_up(&out_results);
+ aws_s3_client_release(client);
+ aws_s3_tester_clean_up(&tester);
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/tests/s3_tester.c b/tests/s3_tester.c
index d2383a099..a98dad648 100644
--- a/tests/s3_tester.c
+++ b/tests/s3_tester.c
@@ -11,6 +11,7 @@
#include "aws/s3/private/s3_util.h"
#include
#include
+#include
#include
#include
#include
@@ -26,6 +27,8 @@
# pragma warning(disable : 4232) /* function pointer to dll symbol */
#endif
+const struct aws_byte_cursor g_mock_server_uri = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http://localhost:8080/");
+
const struct aws_byte_cursor g_test_mrap_endpoint =
AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("moujmk3izc19y.mrap.accesspoint.s3-global.amazonaws.com");
@@ -1179,6 +1182,8 @@ int aws_s3_tester_send_meta_request_with_options(
struct aws_s3_client *client = options->client;
+ struct aws_uri mock_server;
+ ASSERT_SUCCESS(aws_uri_init_parse(&mock_server, allocator, &g_mock_server_uri));
if (client == NULL) {
if (options->client_options != NULL) {
@@ -1206,6 +1211,10 @@ int aws_s3_tester_send_meta_request_with_options(
.checksum_config = &checksum_config,
};
+ if (options->mock_server) {
+ meta_request_options.endpoint = &mock_server;
+ }
+
if (options->signing_config) {
meta_request_options.signing_config = options->signing_config;
}
@@ -1223,7 +1232,10 @@ int aws_s3_tester_send_meta_request_with_options(
}
struct aws_string *host_name = NULL;
- if (options->mrap_test) {
+ if (options->mock_server) {
+ const struct aws_byte_cursor *host_cursor = aws_uri_host_name(&mock_server);
+ host_name = aws_string_new_from_cursor(allocator, host_cursor);
+ } else if (options->mrap_test) {
host_name = aws_string_new_from_cursor(allocator, &g_test_mrap_endpoint);
} else {
host_name = aws_s3_tester_build_endpoint_string(allocator, bucket_name, &g_test_s3_region);
@@ -1432,6 +1444,7 @@ int aws_s3_tester_send_meta_request_with_options(
if (clean_up_local_tester) {
aws_s3_tester_clean_up(&local_tester);
}
+ aws_uri_clean_up(&mock_server);
return AWS_OP_SUCCESS;
}
diff --git a/tests/s3_tester.h b/tests/s3_tester.h
index 5b18af556..4f2b0082a 100644
--- a/tests/s3_tester.h
+++ b/tests/s3_tester.h
@@ -142,6 +142,9 @@ struct aws_s3_tester_meta_request_options {
/* Optional. Used to create a client when the specified client is NULL. If NULL, default options will be used. */
struct aws_s3_tester_client_options *client_options;
+ /* Optional, when enabled, the test will run against local server instead. */
+ bool mock_server;
+
bool validate_get_response_checksum;
enum aws_s3_checksum_algorithm checksum_algorithm;
struct aws_array_list *validate_checksum_algorithms;
@@ -390,6 +393,8 @@ struct aws_input_stream *aws_s3_test_input_stream_new_with_value_type(
extern struct aws_s3_client_vtable g_aws_s3_client_mock_vtable;
+extern const struct aws_byte_cursor g_mock_server_uri;
+
extern const struct aws_byte_cursor g_test_body_content_type;
extern const struct aws_byte_cursor g_test_s3_region;
extern const struct aws_byte_cursor g_test_bucket_name;