Skip to content

Commit

Permalink
Add basic skeleton for aws_s3 testing towards a mocked S3
Browse files Browse the repository at this point in the history
  • Loading branch information
onno-vos-dev committed Feb 23, 2023
1 parent cab7c80 commit d4d6542
Show file tree
Hide file tree
Showing 5 changed files with 344 additions and 20 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ jobs:
image: amazon/dynamodb-local:1.21.0
ports:
- 8000:8000
s3mock:
image: adobe/s3mock:2.11.0
ports:
- 9090:9090
steps:
- name: Checkout
uses: actions/checkout@v2
Expand All @@ -45,6 +49,7 @@ jobs:
run: rebar3 ct
env:
DYNAMODB_HOST: ddb
S3MOCK_HOST: s3mock
- name: Create Cover Reports
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
Expand Down
53 changes: 33 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,7 @@

Here is an example of listing Amazon Kinesis streams. First of all,
start a shell with `rebar3 shell`, then:

```erlang
> Client = aws_client:make_client(<<"my-access-key-id">>, <<"my-secret-access-key">>, <<"eu-west-1">>),
[...]
> {ok, Result, _Response} = aws_kinesis:list_streams(Client, #{}),
[...]
> io:format("~p~n", [Result]).
#{<<"HasMoreStreams">> => false,<<"StreamNames">> => []}
```
### aws_s3

Here is another example, this time using a _temporary_ client, showing
how to upload a file to _S3_ and how to fetch it back:
Expand All @@ -47,12 +39,35 @@ how to upload a file to _S3_ and how to fetch it back:
> Content = maps:get(<<"Body">>, Response).
```

Support for creating [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) is provided
through the `aws_s3_presigned_url` module.

### aws_kinesis

```erlang
> Client = aws_client:make_client(<<"my-access-key-id">>, <<"my-secret-access-key">>, <<"eu-west-1">>),
[...]
> {ok, Result, _Response} = aws_kinesis:list_streams(Client, #{}),
[...]
> io:format("~p~n", [Result]).
#{<<"HasMoreStreams">> => false,<<"StreamNames">> => []}
```

### retry options

Each API which takes `Options` allows a `retry_options` key and can allow for automatic retries.
Simple provide the following:

`[{retry_options, {exponential_with_jitter, {MaxAttempts, BaseSleepTime, CapSleepTime}}} | <other_options>]`

This implementation is based on [AWS: Exponential Backoff And Jitter](https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/).

## Installation

Simply add the library to your `rebar.config`:

```erlang
{deps, [{aws, "0.5.0", {pkg, aws_erlang}}]}.
{deps, [{aws, "1.0.0", {pkg, aws_erlang}}]}.
```

## Obtaining Credentials
Expand All @@ -79,7 +94,7 @@ Here is an example on how to obtain credentials:
The `aws_credentials` application can be installed by adding the following to your `rebar.config`:

```erlang
{deps, [{aws_credentials, "0.1.0"}]}.
{deps, [{aws_credentials, "0.1.10"}]}.
```

## Development
Expand All @@ -96,16 +111,8 @@ The rest of the code is manually written and used as support for the generated c

### Build it locally

Add the [rebar3_docs](https://github.com/jfacorro/rebar3_docs) plugin to your global _rebar3_ config in `~/.config/rebar3/rebar.config`:

```
{plugins, [rebar3_docs]}.
```

Then simply:

```bash
$ rebar3 docs
$ rebar3 ex_doc
```

The docs will be available in `./doc`.
Expand All @@ -116,6 +123,12 @@ The docs will be available in `./doc`.
$ rebar3 eunit
```

```bash
$ docker-compose -f test/docker/docker-compose.yml up -d
$ rebar3 ct
$ docker-compose -f test/docker/docker-compose.yml down
```

## License

Copyright 2015 Jamshed Kakar <jkakar@kakar.ca>
Expand Down
115 changes: 115 additions & 0 deletions test/aws_s3_SUITE.erl
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
-module(aws_s3_SUITE).

-include_lib("common_test/include/ct.hrl").
-include_lib("stdlib/include/assert.hrl").

%% Test server callbacks
-export([suite/0, all/0, init_per_suite/1, end_per_suite/1, init_per_testcase/2,
end_per_testcase/2]).
%% Test cases
-export([bucket_exists/1, create_delete_bucket/1, delete/1, exists/1, list_objects/1,
delete_objects/1, read/1, write/1]).

%%--------------------------------------------------------------------
%% Common test callback functions
suite() ->
[{timetrap, {minutes, 1}}].

init_per_suite(Config) ->
application:ensure_all_started(aws),
Bucket = <<"test-bucket">>,
ok = aws_s3_util:create_bucket(client(), Bucket, []),
[{bucket, Bucket} | Config].

end_per_suite(Config) ->
Bucket = ?config(bucket, Config),
ok = aws_s3_util:delete_bucket(client(), Bucket, []),
ok.

init_per_testcase(_Case, Config) ->
Config.

end_per_testcase(_Case, Config) ->
Bucket = ?config(bucket, Config),
cleanup_keys(Bucket),
ok.

all() ->
[write, read, exists, delete, delete_objects, list_objects, create_delete_bucket].

%%--------------------------------------------------------------------
%% Test cases
write(Config) ->
Bucket = ?config(bucket, Config),
Key = unique_key(),
?assertEqual(ok, aws_s3_util:write(client(), Bucket, Key, <<"data">>, [])),
ok.

read(Config) ->
Bucket = ?config(bucket, Config),
Key = unique_key(),
ok = aws_s3_util:write(client(), Bucket, Key, <<"data">>, []),
?assertEqual({ok, <<"data">>}, aws_s3_util:read(client(), Bucket, Key, [])),
ok.

exists(Config) ->
Bucket = ?config(bucket, Config),
Key = unique_key(),
?assertNot(aws_s3_util:exists(client(), Bucket, Key, [])),
ok = aws_s3_util:write(client(), Bucket, Key, <<"data">>, []),
?assert(aws_s3_util:exists(client(), Bucket, Key, [])),
?assert(aws_s3_util:exists_min_size(client(), Bucket, Key, 1, [])),
ok.

delete(Config) ->
Bucket = ?config(bucket, Config),
Key = unique_key(),
ok = aws_s3_util:write(client(), Bucket, Key, <<"data">>, []),
?assertEqual(ok, aws_s3_util:delete(client(), Bucket, Key, [])),
ok.

delete_objects(Config) ->
Bucket = ?config(bucket, Config),
Key1 = unique_key(),
Key2 = unique_key(),
ok = aws_s3_util:write(client(), Bucket, Key1, <<"data">>, []),
ok = aws_s3_util:write(client(), Bucket, Key2, <<"data">>, []),
?assertEqual({ok, []}, aws_s3_util:delete_objects(client(), Bucket, [Key1, Key2], [])),
ok.

list_objects(Config) ->
Bucket = ?config(bucket, Config),
Key1 = unique_key(),
Key2 = unique_key(),
ok = aws_s3_util:write(client(), Bucket, Key1, <<"data">>, []),
ok = aws_s3_util:write(client(), Bucket, Key2, <<"data">>, []),
?assertEqual([Key1, Key2],
aws_s3_util:list_objects(client(), Bucket, <<"test-key">>, [])),
ok.

create_delete_bucket(_Config) ->
Bucket = unique_key(),
?assertEqual(ok, aws_s3_util:create_bucket(client(), Bucket, [])),
?assertEqual(ok, aws_s3_util:delete_bucket(client(), Bucket, [])),
ok.

bucket_exists(Config) ->
Bucket = ?config(bucket, Config),
?assert(aws_s3_util:bucket_exists(client(), Bucket, [])),
?assertNot(aws_s3_util:bucket_exists(client(), unique_key(), [])),
ok.

%%--------------------------------------------------------------------
%% Helpers
client() ->
aws_client:make_local_client(<<"AccessKeyID">>,
<<"SecretAccessKey">>,
<<"9090">>,
list_to_binary(os:getenv("S3MOCK_HOST", "localhost"))).

cleanup_keys(Bucket) ->
Keys = aws_s3_util:list_objects(client(), Bucket, <<"">>, []),
{ok, []} = aws_s3_util:delete_objects(client(), Bucket, Keys, []).

unique_key() ->
<<"test-key-", (integer_to_binary(erlang:unique_integer([positive])))/binary>>.
Loading

0 comments on commit d4d6542

Please sign in to comment.