From 89177f15706f1124f0929238682816f5c15d02fc Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 23 Jan 2020 09:29:35 +0200 Subject: [PATCH 1/6] chore: re-enable interledger-ildcp --- Cargo.lock | 13 +++++++++++++ Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 717e47c9d..9582d571c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -343,6 +343,19 @@ name = "hex" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "interledger-ildcp" +version = "0.4.0" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-packet 0.4.0", + "interledger-service 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "interledger-packet" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 39ff559db..6219b8f63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,7 @@ members = [ # "./crates/interledger-btp", # "./crates/interledger-ccp", # "./crates/interledger-http", - # "./crates/interledger-ildcp", + "./crates/interledger-ildcp", "./crates/interledger-packet", "./crates/interledger-router", "./crates/interledger-service", From 55703026f035e339ba6c04ee406c5af518ce0065 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 23 Jan 2020 09:30:47 +0200 Subject: [PATCH 2/6] feat(client): convert client to async/await --- Cargo.lock | 5 +++- crates/interledger-ildcp/Cargo.toml | 7 +++++- crates/interledger-ildcp/src/client.rs | 33 ++++++++++++-------------- 3 files changed, 25 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9582d571c..87a822c15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -347,13 +347,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "interledger-ildcp" version = "0.4.0" dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "interledger-service 0.4.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] diff --git a/crates/interledger-ildcp/Cargo.toml b/crates/interledger-ildcp/Cargo.toml index 78e3409a2..bc2755674 100644 --- a/crates/interledger-ildcp/Cargo.toml +++ b/crates/interledger-ildcp/Cargo.toml @@ -10,8 +10,13 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] bytes = { version = "0.4.12", default-features = false } byteorder = { version = "1.3.2", default-features = false } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false } log = { version = "0.4.8", default-features = false } +async-trait = "0.1.22" + +[dev-dependencies] +tokio = { version = "0.2.6", features = ["macros","rt-core"]} +uuid = { version = "0.8.1", features = ["v4"] } diff --git a/crates/interledger-ildcp/src/client.rs b/crates/interledger-ildcp/src/client.rs index 06d1ef4c4..209cb3090 100644 --- a/crates/interledger-ildcp/src/client.rs +++ b/crates/interledger-ildcp/src/client.rs @@ -1,34 +1,31 @@ use super::packet::*; -use futures::Future; +use futures::future::TryFutureExt; use interledger_service::*; use log::{debug, error}; use std::convert::TryFrom; -/// Get the ILP address and asset details for a given account. -pub fn get_ildcp_info( - service: &mut S, - account: A, -) -> impl Future +/// Sends an ILDCP Request to the provided service from the provided account +/// and receives the account's ILP address and asset details +pub async fn get_ildcp_info(service: &mut S, account: A) -> Result where S: IncomingService, A: Account, { let prepare = IldcpRequest {}.to_prepare(); - service + let fulfill = service .handle_request(IncomingRequest { from: account, prepare, }) .map_err(|err| error!("Error getting ILDCP info: {:?}", err)) - .and_then(|fulfill| { - let response = - IldcpResponse::try_from(fulfill.into_data().freeze()).map_err(|err| { - error!( - "Unable to parse ILDCP response from fulfill packet: {:?}", - err - ); - })?; - debug!("Got ILDCP response: {:?}", response); - Ok(response) - }) + .await?; + + let response = IldcpResponse::try_from(fulfill.into_data().freeze()).map_err(|err| { + error!( + "Unable to parse ILDCP response from fulfill packet: {:?}", + err + ); + })?; + debug!("Got ILDCP response: {:?}", response); + Ok(response) } From bc3c6e97b4c4033e03270d840d24074362552564 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 23 Jan 2020 09:31:06 +0200 Subject: [PATCH 3/6] docs(packet): enhance docs --- crates/interledger-ildcp/src/packet.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/interledger-ildcp/src/packet.rs b/crates/interledger-ildcp/src/packet.rs index ea4f02b58..57b36d6cf 100644 --- a/crates/interledger-ildcp/src/packet.rs +++ b/crates/interledger-ildcp/src/packet.rs @@ -29,6 +29,9 @@ pub fn is_ildcp_request(prepare: &Prepare) -> bool { && prepare.destination() == *ILDCP_DESTINATION } +/// ILDCP Requests are sent to peers to receive an ILDCP Response +/// which contains details about how the peer has configured our account +/// (this is just a newtype to type-safe serialize to a Prepare packet) #[derive(Debug, Default)] pub struct IldcpRequest {} @@ -55,11 +58,16 @@ impl From for Prepare { } } +/// The response to an ILDCP Request. #[derive(Clone, PartialEq)] pub struct IldcpResponse { + /// Serialized buffer of the response buffer: Bytes, + /// The asset scale corresponding to the requested account asset_scale: u8, + /// The offset after which the asset code is stored in the buffer asset_code_offset: usize, + /// The ILP Address we have assigned to the requested account ilp_address: Address, } From b11e8db2ade1adc6e03ff4f0559eba76581e60bc Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 23 Jan 2020 09:32:06 +0200 Subject: [PATCH 4/6] feat(server): make the service async --- crates/interledger-ildcp/src/server.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/crates/interledger-ildcp/src/server.rs b/crates/interledger-ildcp/src/server.rs index f08ba5dbd..273993457 100644 --- a/crates/interledger-ildcp/src/server.rs +++ b/crates/interledger-ildcp/src/server.rs @@ -1,6 +1,6 @@ use super::packet::*; use super::Account; -use futures::future::ok; +use async_trait::async_trait; use interledger_packet::*; use interledger_service::*; use log::debug; @@ -27,14 +27,13 @@ where } } +#[async_trait] impl IncomingService for IldcpService where - I: IncomingService, + I: IncomingService + Send, A: Account, { - type Future = BoxedIlpFuture; - - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { if is_ildcp_request(&request.prepare) { let from = request.from.ilp_address(); let builder = IldcpResponseBuilder { @@ -44,10 +43,9 @@ where }; debug!("Responding to query for ildcp info by account: {:?}", from); let response = builder.build(); - let fulfill = Fulfill::from(response); - Box::new(ok(fulfill)) + Ok(Fulfill::from(response)) } else { - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await } } } From 1010c22eb88967e00185889f369fae5c39bf81bc Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Thu, 23 Jan 2020 09:32:21 +0200 Subject: [PATCH 5/6] test(server): add tests --- crates/interledger-ildcp/src/server.rs | 63 ++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/crates/interledger-ildcp/src/server.rs b/crates/interledger-ildcp/src/server.rs index 273993457..da2684eba 100644 --- a/crates/interledger-ildcp/src/server.rs +++ b/crates/interledger-ildcp/src/server.rs @@ -49,3 +49,66 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::get_ildcp_info; + use lazy_static::lazy_static; + use std::str::FromStr; + use uuid::Uuid; + + lazy_static! { + pub static ref ALICE: Username = Username::from_str("alice").unwrap(); + pub static ref EXAMPLE_ADDRESS: Address = Address::from_str("example.alice").unwrap(); + } + + #[derive(Clone, Debug, Copy)] + struct TestAccount; + + impl Account for TestAccount { + fn id(&self) -> Uuid { + Uuid::new_v4() + } + + fn username(&self) -> &Username { + &ALICE + } + + fn asset_scale(&self) -> u8 { + 9 + } + + fn asset_code(&self) -> &str { + "XYZ" + } + + fn ilp_address(&self) -> &Address { + &EXAMPLE_ADDRESS + } + } + + #[tokio::test] + async fn handles_request() { + let from = TestAccount; + let prepare = IldcpRequest {}.to_prepare(); + let req = IncomingRequest { from, prepare }; + let mut service = IldcpService::new(incoming_service_fn(|_| { + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: b"No other incoming handler!", + data: &[], + triggered_by: None, + } + .build()) + })); + + let result = service.handle_request(req).await.unwrap(); + assert_eq!(result.data().len(), 19); + + let ildpc_info = get_ildcp_info(&mut service, from).await.unwrap(); + assert_eq!(ildpc_info.ilp_address(), EXAMPLE_ADDRESS.clone()); + assert_eq!(ildpc_info.asset_code(), b"XYZ"); + assert_eq!(ildpc_info.asset_scale(), 9); + } +} From d25481cbd73ad97ab196035f0dbb41e0f8153f36 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Wed, 29 Jan 2020 12:35:49 +0200 Subject: [PATCH 6/6] # Interledger CCP: Futures 0.3 Transition (#598) * feat(ccp): convert store traits to async/await * feat(ccp-server): make the ccp server async * test(ccp-server): make tests async * chore(routing-table): limit api visibility of table methods # Interledger BTP: Futures 0.3 Transition (#599) * feat(btp): update traits to be async * refactor(btp/wrapped-ws): refactor WsWrap to a separate file Ideally, we would want to get rid of it by doing a `StreamExt::map_ok` and `SinkExt::with` to map both WebSocket return types to the same value. We also use `filter_map` to get rid of any errors from the WebSocket. The WsError type has been removed as a result of that. * feat(btp/client): port to async/await * feat(btp/server): move to async/await * feat(btp/service): move service to async/await * We refactored the service to be more readable. Basically, we split the websocket in a Sink (write) and a Stream (read). We also create a `tx`/`rx` pair per account. The rx receiver gets attached to the sink, meaning any data sent over by the `tx` sender will get forwarded to the sink, which will forward it to the other end of the websocket. Unfortunately, due to being unable to combine the read and write sockets, we have to spawn them separately. This means that we have to remove the hook which cancels the streams. # Interledger HTTP: Futures 0.3 Transition (#600) * feat(http): Update HttpStore trait to futures 0.3 and deserialize_json method * feat(http): Update HTTP Errors and client * feat(http): Update HTTP Server * docs(http): extend http docs # Interledger Stream: Futures 0.3 Transition (#601) * feat(stream): Update Stream server * feat(stream): Update Stream client * docs(stream): extend stream docs * fix(stream): add extra limits to ensure all the pending request futures are thread safe # Interledger Settlement: Futures 0.3 Transition (#602) * feat(settlement/core): Upgrade types and idempotency * feat(settlement/core): Upgrade engines API Warp interface * feat(settlement/core): Upgrade Redis backend implementation * feat(settlement/api): Upgrade the message service * feat(settlement/api): Upgrade the settlement client * feat(settlement/api): Upgrade the Settlement API exposed by the node * chore(settlement): remove need to pass future wrapped in closure * docs(settlement): extend settlement docs # Interledger SPSP: Futures 0.3 Transition (#603) * feat(spsp): move to futures 0.3 and async/await * docs(spsp): extend spsp docs * fix(spsp): tighten trait bounds to account for stream changes # Interledger Service Util: Futures 0.3 Transition (#604) * feat(service-util): update validator service * feat(service-util): update rate limit service * feat(service-util): update max packet amount service * feat(service-util): update expiry shortener service * feat(service-util): update exchange rate service and providers * feat(service-util): update echo service * feat(service-util): update balance service # Interledger API: Futures 0.3 Transition (#605) * feat(api): update trait definitions and dependencies * feat(api): update http retry client * test(api): migrate test helpers * feat(api): update node-settings route * test(api): update node-settings route tests * feat(api): update accounts route * test(api): update accounts route tests * chore(api): add missing doc # Interledger Store: Futures 0.3 Transition (#606) * feat(store): Update redis reconnect * feat(store): Update base redis struct * feat(store): Update AccountStore trait * feat(store): Update StreamNotificationsStore trait * feat(store): Update BalanceStore trait * feat(store): Update BtpStore trait * feat(store): Update HttpStore trait * feat(store): Update NodeStore trait * feat(store): Update AddressStore trait * feat(store): Update RouteManagerStore trait * feat(store): Update RateLimitStore trait * feat(store): Update IdempotentStore trait * feat(store): Update SettlementStore trait * feat(store): Update LeftoversStore trait * feat(store): Update update_routes * test(store): convert all tests to tokio::test with async/await * feat(store): update secrecy/bytes/zeroize * docs(store): add more docs # ILP CLI: Futures 0.3 Transition (#607) * feat(ilp-cli): update CLI to async/await # ILP Node: Futures 0.3 Transition (#608) (#609) * test(ilp-node): migrate tests to futures 0.3 * feat(ilp-node): move metrics related files to feature-gated module * feat(ilp-node): remove deprecated insert account function * feat(ilp-node): make the node run on async/await * ci(ilp-node): disable some advisories and update README * fix(ilp-node): spawn prometheus filter # Service * feat(service): Box wrapper methods to avoid exponential type blowup --- .circleci/config.yml | 59 +- Cargo.lock | 2108 ++++++++++++++- Cargo.toml | 24 +- README.md | 16 +- crates/ilp-cli/Cargo.toml | 6 +- crates/ilp-cli/src/interpreter.rs | 5 +- crates/ilp-cli/src/main.rs | 36 +- crates/ilp-node/Cargo.toml | 41 +- crates/ilp-node/src/google_pubsub.rs | 186 -- .../src/instrumentation/google_pubsub.rs | 187 ++ .../ilp-node/src/instrumentation/metrics.rs | 82 + crates/ilp-node/src/instrumentation/mod.rs | 10 + .../src/instrumentation/prometheus.rs | 90 + .../src/{ => instrumentation}/trace.rs | 47 +- crates/ilp-node/src/lib.rs | 11 +- crates/ilp-node/src/main.rs | 24 +- crates/ilp-node/src/metrics.rs | 82 - crates/ilp-node/src/node.rs | 563 ++-- crates/ilp-node/src/redis_store.rs | 46 +- crates/ilp-node/tests/redis/btp.rs | 196 +- crates/ilp-node/tests/redis/exchange_rates.rs | 139 +- crates/ilp-node/tests/redis/prometheus.rs | 182 +- crates/ilp-node/tests/redis/redis_helpers.rs | 24 +- crates/ilp-node/tests/redis/redis_tests.rs | 6 +- crates/ilp-node/tests/redis/test_helpers.rs | 125 +- crates/ilp-node/tests/redis/three_nodes.rs | 361 +-- crates/interledger-api/Cargo.toml | 20 +- crates/interledger-api/src/http_retry.rs | 63 +- crates/interledger-api/src/lib.rs | 137 +- crates/interledger-api/src/routes/accounts.rs | 882 +++--- .../src/routes/node_settings.rs | 353 +-- .../src/routes/test_helpers.rs | 137 +- crates/interledger-btp/Cargo.toml | 27 +- crates/interledger-btp/src/client.rs | 128 +- crates/interledger-btp/src/lib.rs | 273 +- crates/interledger-btp/src/server.rs | 285 +- crates/interledger-btp/src/service.rs | 525 ++-- crates/interledger-btp/src/wrapped_ws.rs | 92 + crates/interledger-ccp/Cargo.toml | 6 +- crates/interledger-ccp/src/lib.rs | 29 +- crates/interledger-ccp/src/packet.rs | 22 +- crates/interledger-ccp/src/routing_table.rs | 30 +- crates/interledger-ccp/src/server.rs | 998 +++---- crates/interledger-ccp/src/test_helpers.rs | 68 +- crates/interledger-http/Cargo.toml | 14 +- crates/interledger-http/src/client.rs | 147 +- .../interledger-http/src/error/error_types.rs | 36 +- crates/interledger-http/src/error/mod.rs | 59 +- crates/interledger-http/src/lib.rs | 111 +- crates/interledger-http/src/server.rs | 200 +- crates/interledger-service-util/Cargo.toml | 15 +- .../src/balance_service.rs | 196 +- .../src/echo_service.rs | 88 +- .../src/exchange_rate_providers/coincap.rs | 78 +- .../exchange_rate_providers/cryptocompare.rs | 74 +- .../src/exchange_rate_providers/mod.rs | 2 + .../src/exchange_rates_service.rs | 166 +- .../src/expiry_shortener_service.rs | 32 +- crates/interledger-service-util/src/lib.rs | 10 + .../src/max_packet_amount_service.rs | 22 +- .../src/rate_limit_service.rs | 91 +- .../src/validator_service.rs | 176 +- crates/interledger-service/src/lib.rs | 238 +- crates/interledger-settlement/Cargo.toml | 17 +- .../interledger-settlement/src/api/client.rs | 78 +- .../src/api/message_service.rs | 174 +- crates/interledger-settlement/src/api/mod.rs | 5 +- .../src/api/node_api.rs | 641 ++--- .../src/api/test_helpers.rs | 183 +- .../src/core/backends_common/redis/mod.rs | 339 ++- .../backends_common/redis/test_helpers/mod.rs | 2 +- .../redis/test_helpers/redis_helpers.rs | 11 +- .../redis/test_helpers/store_helpers.rs | 23 +- .../src/core/engines_api.rs | 379 ++- .../src/core/idempotency.rs | 218 +- crates/interledger-settlement/src/core/mod.rs | 31 +- .../interledger-settlement/src/core/types.rs | 149 +- crates/interledger-settlement/src/lib.rs | 4 +- crates/interledger-spsp/Cargo.toml | 12 +- crates/interledger-spsp/src/client.rs | 72 +- crates/interledger-spsp/src/lib.rs | 7 + crates/interledger-spsp/src/server.rs | 41 +- crates/interledger-store/Cargo.toml | 17 +- crates/interledger-store/src/account.rs | 73 +- crates/interledger-store/src/crypto.rs | 12 +- crates/interledger-store/src/lib.rs | 3 + crates/interledger-store/src/redis/mod.rs | 2369 ++++++++--------- .../interledger-store/src/redis/reconnect.rs | 120 +- .../tests/redis/accounts_test.rs | 662 ++--- .../tests/redis/balances_test.rs | 441 ++- .../interledger-store/tests/redis/btp_test.rs | 118 +- .../tests/redis/http_test.rs | 127 +- .../tests/redis/rate_limiting_test.rs | 154 +- .../tests/redis/rates_test.rs | 41 +- .../tests/redis/redis_tests.rs | 82 +- .../tests/redis/routing_test.rs | 582 ++-- .../tests/redis/settlement_test.rs | 595 ++--- crates/interledger-stream/Cargo.toml | 6 +- crates/interledger-stream/src/client.rs | 295 +- crates/interledger-stream/src/congestion.rs | 15 + crates/interledger-stream/src/crypto.rs | 36 + crates/interledger-stream/src/error.rs | 1 + crates/interledger-stream/src/lib.rs | 51 +- crates/interledger-stream/src/packet.rs | 145 +- crates/interledger-stream/src/server.rs | 63 +- 105 files changed, 10428 insertions(+), 8452 deletions(-) delete mode 100644 crates/ilp-node/src/google_pubsub.rs create mode 100644 crates/ilp-node/src/instrumentation/google_pubsub.rs create mode 100644 crates/ilp-node/src/instrumentation/metrics.rs create mode 100644 crates/ilp-node/src/instrumentation/mod.rs create mode 100644 crates/ilp-node/src/instrumentation/prometheus.rs rename crates/ilp-node/src/{ => instrumentation}/trace.rs (82%) delete mode 100644 crates/ilp-node/src/metrics.rs create mode 100644 crates/interledger-btp/src/wrapped_ws.rs diff --git a/.circleci/config.yml b/.circleci/config.yml index 9a591e954..419583c3c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -44,7 +44,11 @@ jobs: cargo clippy --all-targets --all-features -- -D warnings - run: name: Audit Dependencies - command: cargo audit + # Disable: + # 1. lazy_static advisory: https://github.com/interledger-rs/interledger-rs/issues/588 + # 2. http/hyper advisory: outdated http due to yup-oauth2 3.1.1, tungstenite 0.9.2 + command: cargo audit --ignore RUSTSEC-2019-0033 --ignore RUSTSEC-2019-0034 --ignore RUSTSEC-2019-0031 + test-md: docker: - image: circleci/rust @@ -57,37 +61,34 @@ jobs: steps: - checkout - run: - name: Disabled - command: echo "temporarily disabled" - # - run: - # name: Install Dependencies - # command: | - # # install system dependeicies - # sudo apt-get update - # sudo apt-get install -y redis-server redis-tools lsof libssl-dev + name: Install Dependencies + command: | + # install system dependeicies + sudo apt-get update + sudo apt-get install -y redis-server redis-tools lsof libssl-dev - # # install nvm - # curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash - # export NVM_DIR="/home/circleci/.nvm" - # source $NVM_DIR/nvm.sh - # nvm install "v11.15.0" + # install nvm + curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash + export NVM_DIR="/home/circleci/.nvm" + source $NVM_DIR/nvm.sh + nvm install "v11.15.0" - # # install yarn & components (ganache-cli ilp-settlement-xrp conventional-changelog-cli) - # curl -o- -L https://yarnpkg.com/install.sh | bash - # export PATH="/home/circleci/.yarn/bin:/home/circleci/.config/yarn/global/node_modules/.bin:$PATH" - # yarn global add ganache-cli ilp-settlement-xrp conventional-changelog-cli + # install yarn & components (ganache-cli ilp-settlement-xrp conventional-changelog-cli) + curl -o- -L https://yarnpkg.com/install.sh | bash + export PATH="/home/circleci/.yarn/bin:/home/circleci/.config/yarn/global/node_modules/.bin:$PATH" + yarn global add ganache-cli ilp-settlement-xrp conventional-changelog-cli - # # env - # echo 'export NVM_DIR="/home/circleci/.nvm"' >> ${BASH_ENV} - # echo 'source $NVM_DIR/nvm.sh' >> ${BASH_ENV} - # echo "export PATH=/home/circleci/.cargo/bin:$PATH" >> ${BASH_ENV} - # - run: - # name: Run run-md Test - # command: | - # scripts/run-md-test.sh '^.*$' 1 - # - store_artifacts: - # path: /tmp/run-md-test - # destination: run-md-test + # env + echo 'export NVM_DIR="/home/circleci/.nvm"' >> ${BASH_ENV} + echo 'source $NVM_DIR/nvm.sh' >> ${BASH_ENV} + echo "export PATH=/home/circleci/.cargo/bin:$PATH" >> ${BASH_ENV} + - run: + name: Run run-md Test + command: | + scripts/run-md-test.sh '^.*$' 1 + - store_artifacts: + path: /tmp/run-md-test + destination: run-md-test update-docker-images: docker: - image: circleci/rust diff --git a/Cargo.lock b/Cargo.lock index 87a822c15..ad16b4195 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,39 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +[[package]] +name = "aho-corasick" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ansi_term" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "anyhow" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "approx" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "arc-swap" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "arrayvec" version = "0.4.12" @@ -8,14 +42,28 @@ dependencies = [ "nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ascii" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "assert-json-diff" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "async-trait" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -32,6 +80,11 @@ name = "autocfg" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "autocfg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "base64" version = "0.10.1" @@ -40,11 +93,35 @@ dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bstr" version = "0.2.8" @@ -56,6 +133,16 @@ dependencies = [ "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "bumpalo" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "byteorder" version = "1.3.2" @@ -67,6 +154,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -92,6 +180,11 @@ name = "cast" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "cc" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "cfg-if" version = "0.1.10" @@ -103,8 +196,9 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", - "num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -125,6 +219,45 @@ dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "combine" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ascii 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "config" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation-sys" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "criterion" version = "0.3.0" @@ -137,7 +270,7 @@ dependencies = [ "csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_os 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand_xoshiro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -180,6 +313,19 @@ dependencies = [ "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "crossbeam-epoch" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "crossbeam-queue" version = "0.1.2" @@ -197,6 +343,16 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "crossbeam-utils" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "csv" version = "1.1.1" @@ -217,16 +373,100 @@ dependencies = [ "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ct-logs" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "difference" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dtoa" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "either" version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "encoding_rs" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "env_logger" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "failure" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "failure_derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "fnv" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "fuchsia-zircon" version = "0.3.3" @@ -274,6 +514,15 @@ name = "futures-core" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "futures-cpupool" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "futures-executor" version = "0.3.1" @@ -295,9 +544,19 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-retry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -315,6 +574,7 @@ name = "futures-util" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures-io 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -328,6 +588,14 @@ dependencies = [ "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "getrandom" version = "0.1.13" @@ -339,63 +607,491 @@ dependencies = [ ] [[package]] -name = "hex" -version = "0.4.0" +name = "h2" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "interledger-ildcp" -version = "0.4.0" dependencies = [ - "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "interledger-packet 0.4.0", - "interledger-service 0.4.0", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "interledger-packet" -version = "0.4.0" +name = "h2" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", - "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_test 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "interledger-router" -version = "0.4.0" +name = "hdrhistogram" +version = "6.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "interledger-packet 0.4.0", - "interledger-service 0.4.0", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "interledger-service" -version = "0.4.0" +name = "headers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "headers-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "sha-1 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "heck" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hex" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "http" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "http" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "http-body" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "http-body" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "httparse" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "hyper" +version = "0.12.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hyper" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hyper-rustls" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-rustls 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki-roots 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hyper-tls" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tls 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "idna" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ilp-cli" +version = "0.3.0" +dependencies = [ + "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "thiserror 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ilp-node" +version = "0.6.0" +dependencies = [ + "approx 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "config 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger 0.6.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics-core 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics-runtime 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "num-bigint 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "redis 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "secrecy 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-retry 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing-futures 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing-subscriber 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "yup-oauth2 3.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "im" +version = "12.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "sized-chunks 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "indexmap" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "input_buffer" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger" +version = "0.6.0" +dependencies = [ + "interledger-api 0.3.0", + "interledger-btp 0.4.0", + "interledger-ccp 0.3.0", + "interledger-http 0.4.0", + "interledger-ildcp 0.4.0", + "interledger-packet 0.4.0", + "interledger-router 0.4.0", + "interledger-service 0.4.0", + "interledger-service-util 0.4.0", + "interledger-settlement 0.3.0", + "interledger-spsp 0.4.0", + "interledger-store 0.4.0", + "interledger-stream 0.4.0", +] + +[[package]] +name = "interledger-api" +version = "0.3.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-retry 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-btp 0.4.0", + "interledger-ccp 0.3.0", + "interledger-http 0.4.0", + "interledger-ildcp 0.4.0", + "interledger-packet 0.4.0", + "interledger-router 0.4.0", + "interledger-service 0.4.0", + "interledger-service-util 0.4.0", + "interledger-settlement 0.3.0", + "interledger-spsp 0.4.0", + "interledger-stream 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "secrecy 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_path_to_error 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-btp" +version = "0.4.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-packet 0.4.0", + "interledger-service 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "num-bigint 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "secrecy 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "stream-cancel 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tungstenite 0.10.0 (git+https://github.com/snapview/tokio-tungstenite)", + "tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-ccp" +version = "0.3.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-packet 0.4.0", + "interledger-service 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-http" +version = "0.4.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-packet 0.4.0", + "interledger-service 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "secrecy 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_path_to_error 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-ildcp" +version = "0.4.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-packet 0.4.0", + "interledger-service 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-packet" +version = "0.4.0" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_test 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-router" +version = "0.4.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-packet 0.4.0", + "interledger-service 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-service" +version = "0.4.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "interledger-packet 0.4.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -408,6 +1104,147 @@ dependencies = [ "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "interledger-service-util" +version = "0.4.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-packet 0.4.0", + "interledger-service 0.4.0", + "interledger-settlement 0.3.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "secrecy 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-settlement" +version = "0.3.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-http 0.4.0", + "interledger-packet 0.4.0", + "interledger-service 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mockito 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "num-bigint 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "redis 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-retry 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "warp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-spsp" +version = "0.4.0" +dependencies = [ + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-packet 0.4.0", + "interledger-service 0.4.0", + "interledger-stream 0.4.0", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-store" +version = "0.4.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-api 0.3.0", + "interledger-btp 0.4.0", + "interledger-ccp 0.3.0", + "interledger-http 0.4.0", + "interledger-packet 0.4.0", + "interledger-router 0.4.0", + "interledger-service 0.4.0", + "interledger-service-util 0.4.0", + "interledger-settlement 0.3.0", + "interledger-stream 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "num-bigint 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "os_type 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "redis 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "secrecy 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "zeroize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "interledger-stream" +version = "0.4.0" +dependencies = [ + "async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "interledger-ildcp 0.4.0", + "interledger-packet 0.4.0", + "interledger-router 0.4.0", + "interledger-service 0.4.0", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "iovec" version = "0.1.4" @@ -429,6 +1266,14 @@ name = "itoa" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "js-sys" +version = "0.3.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "kernel32-sys" version = "0.2.2" @@ -448,6 +1293,11 @@ name = "libc" version = "0.2.65" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "linked-hash-map" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "lock_api" version = "0.3.3" @@ -456,6 +1306,14 @@ dependencies = [ "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "log" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "log" version = "0.4.8" @@ -464,6 +1322,19 @@ dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "maybe-uninit" version = "2.0.0" @@ -485,6 +1356,69 @@ dependencies = [ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "metrics" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "metrics-core 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "metrics-core" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "metrics-observer-prometheus" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "hdrhistogram 6.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics-core 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "metrics-runtime" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arc-swap 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "im 12.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics-core 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics-observer-prometheus 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "quanta 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "metrics-util" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "mime_guess" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "mio" version = "0.6.21" @@ -524,6 +1458,39 @@ dependencies = [ "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "mockito" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "assert-json-diff 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "native-tls" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", + "schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "net2" version = "0.2.33" @@ -539,21 +1506,40 @@ name = "nodrop" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "nom" +version = "4.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-bigint" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "num-integer" -version = "0.1.41" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-traits" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -564,6 +1550,57 @@ dependencies = [ "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "openssl" +version = "0.10.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "openssl-probe" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "openssl-sys" +version = "0.9.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", + "vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "os_type" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "owning_ref" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "parking_lot" version = "0.9.0" @@ -588,6 +1625,16 @@ dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "percent-encoding" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "pin-project" version = "0.4.7" @@ -601,9 +1648,9 @@ name = "pin-project-internal" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -616,6 +1663,11 @@ name = "pin-utils" version = "0.1.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "pkg-config" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "ppv-lite86" version = "0.2.6" @@ -626,9 +1678,9 @@ name = "proc-macro-hack" version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -637,11 +1689,20 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "proc-macro2" -version = "1.0.6" +name = "proc-macro2" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quanta" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -654,7 +1715,19 @@ name = "quote" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -678,6 +1751,19 @@ dependencies = [ "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "rand_core" version = "0.5.1" @@ -733,6 +1819,33 @@ dependencies = [ "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "redis" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "combine 3.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-executor 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "sha1 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "redox_syscall" version = "0.1.56" @@ -743,7 +1856,10 @@ name = "regex" version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -752,6 +1868,8 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -759,6 +1877,63 @@ name = "regex-syntax" version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "remove_dir_all" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "reqwest" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper-tls 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tls 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ring" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rustc_version" version = "0.2.3" @@ -767,6 +1942,18 @@ dependencies = [ "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rustls" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ryu" version = "1.0.2" @@ -780,11 +1967,72 @@ dependencies = [ "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "schannel" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "scopeguard" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "sct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "secrecy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "zeroize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "secrecy" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "zeroize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "security-framework" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "security-framework-sys" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "semver" version = "0.9.0" @@ -811,9 +2059,9 @@ name = "serde_derive" version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -826,6 +2074,14 @@ dependencies = [ "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "serde_test" version = "1.0.102" @@ -834,6 +2090,41 @@ dependencies = [ "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "serde_urlencoded" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "sha-1" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "sha1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "sized-chunks" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "slab" version = "0.4.2" @@ -852,16 +2143,74 @@ name = "smallvec" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "sourcefile" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "stable_deref_trait" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "stream-cancel" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "string" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "syn" -version = "1.0.7" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "synstructure" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tempfile" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "textwrap" version = "0.11.0" @@ -870,6 +2219,42 @@ dependencies = [ "unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "thiserror" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "thiserror-impl 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "time" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tinytemplate" version = "1.0.2" @@ -908,10 +2293,30 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-macros 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-buf" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-codec" version = "0.1.1" @@ -966,7 +2371,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -984,7 +2389,30 @@ dependencies = [ "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-retry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-rustls" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1036,6 +2464,29 @@ dependencies = [ "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.10.0" +source = "git+https://github.com/snapview/tokio-tungstenite#308d9680c0e59dd1e8651659a775c05df937934e" +dependencies = [ + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tls 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-udp" version = "0.1.5" @@ -1067,12 +2518,39 @@ dependencies = [ "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-util" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "toml" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tower-service" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "tracing" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "tracing-attributes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "tracing-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1083,7 +2561,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1106,6 +2584,61 @@ dependencies = [ "tracing 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tracing-log" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tracing-subscriber" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matchers 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing-log 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "try-lock" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "tungstenite" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "input_buffer 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "sha-1 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "utf-8 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "typenum" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "unicase" version = "2.6.0" @@ -1114,6 +2647,14 @@ dependencies = [ "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "unicode-normalization" version = "0.1.12" @@ -1122,6 +2663,11 @@ dependencies = [ "smallvec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "unicode-segmentation" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "unicode-width" version = "0.1.6" @@ -1132,19 +2678,84 @@ name = "unicode-xid" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "unreachable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "untrusted" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "url" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "url" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "urlencoding" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "utf-8" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "utf8-ranges" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "uuid" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "vcpkg" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "version_check" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "version_check" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "walkdir" version = "2.2.9" @@ -1155,11 +2766,168 @@ dependencies = [ "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "want" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "warp" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "headers 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", + "urlencoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "wasi" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "wasm-bindgen" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bumpalo 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "wasm-bindgen-webidl" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", + "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "web-sys" +version = "0.3.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "webpki" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "webpki-roots" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "weedle" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "winapi" version = "0.2.8" @@ -1197,6 +2965,14 @@ name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -1206,108 +2982,258 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "yaml-rust" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "yup-oauth2" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper-rustls 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "zeroize" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [metadata] +"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" +"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +"checksum anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)" = "7825f6833612eb2414095684fcf6c635becf3ce97fe48cf6421321e93bfbd53c" +"checksum approx 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" +"checksum arc-swap 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)" = "bc4662175ead9cd84451d5c35070517777949a2ed84551764129cedb88384841" "checksum arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +"checksum ascii 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" +"checksum assert-json-diff 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9881d306dee755eccf052d652b774a6b2861e86b4772f555262130e58e4f81d2" "checksum async-trait 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "c8df72488e87761e772f14ae0c2480396810e51b2c2ade912f97f0f7e5b95e3c" "checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" "checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" +"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" "checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +"checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +"checksum block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" "checksum bstr 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8d6c2c5b58ab920a4f5aeaaca34b4488074e8cc7596af94e6f8c6ff247c60245" +"checksum bumpalo 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5fb8038c1ddc0a5f73787b130f4cc75151e96ed33e417fde765eb5a81e3532f4" +"checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" "checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" "checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" "checksum bytes 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "10004c15deb332055f7a4a208190aed362cf9a7c2f6ab70a305fba50e1105f38" "checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" "checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427" +"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" "checksum chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e8493056968583b0193c1bb04d6f7684586f3726992d6c573261941a895dbd68" "checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +"checksum combine 3.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" +"checksum config 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f9107d78ed62b3fa5a86e7d18e647abed48cfd8f8fab6c72f4cdb982d196f7e6" +"checksum core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d" +"checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" "checksum criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "938703e165481c8d612ea3479ac8342e5615185db37765162e762ec3523e2fc6" "checksum criterion-plot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eccdc6ce8bbe352ca89025bee672aa6d24f4eb8c53e3a8b5d1bc58011da072a2" "checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71" "checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9" +"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" "checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" "checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" +"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" "checksum csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "37519ccdfd73a75821cac9319d4fce15a81b9fcf75f951df5b9988aa3a0af87d" "checksum csv-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9b5cadb6b25c77aeff80ba701712494213f4a8418fcda2ee11b6560c3ad0bf4c" +"checksum ct-logs 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4d3686f5fa27dbc1d76c751300376e167c5a43387f44bb451fd1c24776e49113" +"checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" +"checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +"checksum dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ea57b42383d091c85abcc2706240b94ab2a8fa1fc81c10ff23c4de06e2a90b5e" "checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" +"checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" +"checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +"checksum failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9" +"checksum failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08" +"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" "checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +"checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +"checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" "checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" "checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" "checksum futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" "checksum futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6f16056ecbb57525ff698bb955162d0cd03bee84e6241c27ff75c08d8ca5987" "checksum futures-channel 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fcae98ca17d102fd8a3603727b9259fcf7fa4239b603d2142926189bc8999b86" "checksum futures-core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "79564c427afefab1dfb3298535b21eda083ef7935b4f0ecbfcb121f0aec10866" +"checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" "checksum futures-executor 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1e274736563f686a837a0568b478bdabfeaec2dca794b5649b04e2fe1627c231" "checksum futures-io 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e676577d229e70952ab25f3945795ba5b16d63ca794ca9d2c860e5595d20b5ff" "checksum futures-macro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "52e7c56c15537adb4f76d0b7a76ad131cb4d2f4f32d3b0bcabcbe1c7c5e87764" +"checksum futures-retry 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc9a95ec273db7b9d07559e25f9cd75074fee2f437f1e502b0c3b610d129d554" "checksum futures-sink 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "171be33efae63c2d59e6dbba34186fe0d6394fb378069a76dfd80fdcffd43c16" "checksum futures-task 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0bae52d6b29cf440e298856fec3965ee6fa71b06aa7495178615953fd669e5f9" "checksum futures-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0d66274fb76985d3c62c886d1da7ac4c0903a8c9f754e8fe0f35a6a6cc39e76" +"checksum generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" "checksum getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e7db7ca94ed4cd01190ceee0d8a8052f08a247aa1b469a7f68c6a3b71afcf407" +"checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" +"checksum h2 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b9433d71e471c1736fd5a61b671fc0b148d7a2992f666c958d03cd8feb3b88d1" +"checksum hdrhistogram 6.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08d331ebcdbca4acbefe5da8c3299b2e246f198a8294cc5163354e743398b89d" +"checksum headers 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c9836ffd533e1fb207cfdb2e357079addbd17ef5c68eea5afe2eece40555b905" +"checksum headers-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" "checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e" +"checksum http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" +"checksum http 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" +"checksum http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" +"checksum http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +"checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +"checksum hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)" = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" +"checksum hyper 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8bf49cfb32edee45d890537d9057d1b02ed55f53b7b6a30bae83a38c9231749e" +"checksum hyper-rustls 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "719d85c7df4a7f309a77d145340a063ea929dcb2e025bae46a80345cffec2952" +"checksum hyper-tls 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3adcd308402b9553630734e9c36b77a7e48b3821251ca2493e8cd596763aafaa" +"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +"checksum im 12.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "de38d1511a0ce7677538acb1e31b5df605147c458e061b2cdb89858afb1cd182" +"checksum indexmap 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b54058f0a6ff80b6803da8faf8997cde53872b38f4023728f6830b06cd3c0dc" +"checksum input_buffer 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8e1b822cc844905551931d6f81608ed5f50a79c1078a4e2b4d42dbc7c1eedfbf" "checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" "checksum itertools 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "87fa75c9dea7b07be3138c49abbb83fd4bea199b5cdc76f9804458edc5da0d6e" "checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" +"checksum js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "7889c7c36282151f6bf465be4700359318aef36baa951462382eae49e9577cf9" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" "checksum libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)" = "1a31a0627fdf1f6a39ec0dd577e101440b7db22672c0901fe00a9a6fbb5c24e8" +"checksum linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" "checksum lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" +"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" "checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +"checksum matchers 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" "checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" "checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" "checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f" +"checksum metrics 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "51b70227ece8711a1aa2f99655efd795d0cff297a5b9fe39645a93aacf6ad39d" +"checksum metrics-core 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c064b3a1ff41f4bf6c91185c8a0caeccf8a8a27e9d0f92cc54cf3dbec812f48" +"checksum metrics-observer-prometheus 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4f9bb94f40e189c87cf70ef1c78815b949ab9d28fe76ebb81f15f79bd19a33d6" +"checksum metrics-runtime 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "15ef9de8e4a0dd82d38f8588ef40c11db7e12b75c45945fd3ef6993a708f7ced" +"checksum metrics-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d11f8090a8886339f9468a04eeea0711e4cf27538b134014664308041307a1c5" +"checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +"checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" "checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" "checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +"checksum mockito 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aee38c301104cc75a6628a4360be706fbdf84290c15a120b7e54eca5881c3450" +"checksum native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b2df1a4c22fd44a62147fd8f13dd0f95c9d8ca7b2610299b2a2f9cf8964274e" "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" "checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" -"checksum num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "b85e541ef8255f6cf42bbfe4ef361305c6c135d10919ecc26126c4e5ae94bc09" -"checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32" +"checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" +"checksum num-bigint 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f115de20ad793e857f76da2563ff4a09fbcfd6fe93cca0c5d996ab5f3ee38d" +"checksum num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" +"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" "checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273" +"checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +"checksum openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)" = "3a3cc5799d98e1088141b8e01ff760112bbd9f19d850c124500566ca6901a585" +"checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +"checksum openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)" = "465d16ae7fc0e313318f7de5cecf57b2fbe7511fd213978b457e1c96ff46736f" +"checksum os_type 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7edc011af0ae98b7f88cf7e4a83b70a54a75d2b8cb013d6efd02e5956207e9eb" +"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13" "checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" "checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" +"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" "checksum pin-project 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "75fca1c4ff21f60ca2d37b80d72b63dab823a9d19d3cda3a81d18bc03f0ba8c5" "checksum pin-project-internal 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6544cd4e4ecace61075a6ec78074beeef98d58aa9a3d07d053d993b2946a90d6" "checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" "checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" +"checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" "checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" "checksum proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)" = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" "checksum proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" -"checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" +"checksum proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548" +"checksum quanta 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f7a1905379198075914bc93d32a5465c40474f90a078bb13439cb00c547bcc" "checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" "checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" "checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" "checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" +"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" "checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" "checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" "checksum rand_os 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a788ae3edb696cfcba1c19bfd388cc4b8c21f8a408432b199c072825084da58a" "checksum rand_xoshiro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0e18c91676f670f6f0312764c759405f13afb98d5d73819840cf72a518487bff" "checksum rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123" "checksum rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b" +"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +"checksum redis 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb1fe3fc011cde97315f370bc88e4db3c23b08709a04915921e02b1d363b20" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" "checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd" "checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" "checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" +"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" +"checksum reqwest 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0e798e19e258bf6c30a304622e3e9ac820e483b06a1857a026e1f109b113fe4" +"checksum ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6747f8da1f2b1fabbee1aaa4eb8a11abf9adef0bf58a41cee45db5d59cecdfac" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +"checksum rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" "checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" "checksum same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "585e8ddcedc187886a30fa705c47985c3fa88d06624095856b36ca0b82ff4421" +"checksum schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "87f550b06b6cba9c8b8be3ee73f391990116bf527450d2556e9b9ce263b9a021" +"checksum scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" "checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" +"checksum sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +"checksum secrecy 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2309a011083016deb67a984e8edb0845e42b4c6aaadae7658ebdcb47b91fdbc" +"checksum secrecy 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9182278ed645df3477a9c27bfee0621c621aa16f6972635f7f795dae3d81070f" +"checksum security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8ef2429d7cefe5fd28bd1d2ed41c944547d4ff84776f5935b456da44593a16df" +"checksum security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e31493fc37615debb8c5090a7aeb4a9730bc61e77ab10b9af59f1a202284f895" "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" "checksum serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4b39bd9b0b087684013a792c59e3e07a46a01d2322518d8a1104641a0b1be0" "checksum serde_derive 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)" = "ca13fc1a832f793322228923fbb3aba9f3f44444898f835d31ad1b74fa0a2bf8" "checksum serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)" = "2f72eb2a68a7dc3f9a691bfda9305a1c017a6215e5a4545c258500d2099a37c2" +"checksum serde_path_to_error 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "359b895005d818163c78a24d272cc98567cce80c2461cf73f513da1d296c0b62" "checksum serde_test 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)" = "00d9d9443b1a25de2526ad21a2efc89267df5387c36035fe3902fbda8a79d83c" +"checksum serde_urlencoded 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +"checksum sha-1 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" +"checksum sha1 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +"checksum sized-chunks 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9d3e7f23bad2d6694e0f46f5e470ec27eb07b8f3e8b309a4b0dc17501928b9f2" "checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" "checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" "checksum smallvec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44e59e0c9fa00817912ae6e4e6e3c4fe04455e75699d06eedc7d85917ed8e8f4" -"checksum syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "0e7bedb3320d0f3035594b0b723c8a28d7d336a3eda3881db79e61d676fb644c" +"checksum sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" +"checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" +"checksum stream-cancel 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "65851535511fa506e51dfa6f718f91ad2ea91f61a11392a093e0341f9730ebd5" +"checksum string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" +"checksum syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5" +"checksum synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" +"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" "checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +"checksum thiserror 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6f357d1814b33bc2dc221243f8424104bfe72dbe911d5b71b3816a2dff1c977e" +"checksum thiserror-impl 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "eb2e25d25307eb8436894f727aba8f65d07adf02e5b35a13cebed48bd282bfef" +"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" +"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" "checksum tinytemplate 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4574b75faccaacddb9b284faecdf0b544b80b6b294f3d062d325c5726a209c20" "checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" "checksum tokio 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "c1fc73332507b971a5010664991a441b5ee0de92017f5a0e8b00fd684573045b" +"checksum tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" "checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f" "checksum tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d16217cad7f1b840c5a97dfb3c43b0c871fef423a6e8d2118c604e843662a443" "checksum tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ca6df436c42b0c3330a82d855d2ef017cd793090ad550a6bc2184f4b933532ab" @@ -1315,28 +3241,70 @@ dependencies = [ "checksum tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5090db468dad16e1a7a54c8c67280c5e4b544f3d3e018f0b913b400261f85926" "checksum tokio-macros 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "50a61f268a3db2acee8dcab514efc813dc6dbe8a00e86076f935f94304b59a7a" "checksum tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6732fe6b53c8d11178dcb77ac6d9682af27fc6d4cb87789449152e5377377146" +"checksum tokio-retry 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c03755b956458582182941061def32b8123a26c98b08fc6ddcf49ae89d18f33" +"checksum tokio-rustls 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2d7cf08f990090abd6c6a73cab46fed62f85e8aef8b99e4b918a9f4a637f0676" "checksum tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "d06554cce1ae4a50f42fba8023918afa931413aded705b560e29600ccf7c6d76" "checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119" "checksum tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c32ffea4827978e9aa392d2f743d973c1dfa3730a2ed3f22ce1e6984da848c" "checksum tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1739638e364e558128461fc1ad84d997702c8e31c2e6b18fb99842268199e827" +"checksum tokio-tls 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7bde02a3a5291395f59b06ec6945a3077602fac2b07eeeaf0dee2122f3619828" +"checksum tokio-tungstenite 0.10.0 (git+https://github.com/snapview/tokio-tungstenite)" = "" "checksum tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f02298505547f73e60f568359ef0d016d5acd6e830ab9bc7c4a5b3403440121b" "checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" +"checksum tokio-util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "571da51182ec208780505a32528fc5512a8fe1443ab960b3f2f3ef093cd16930" +"checksum toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" +"checksum tower-service 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" "checksum tracing 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1e213bd24252abeb86a0b7060e02df677d367ce6cb772cef17e9214b8390a8d3" "checksum tracing-attributes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04cfd395def5a60236e187e1ff905cb55668a59f29928dec05e6e1b1fd2ac1f3" "checksum tracing-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "13a46f11e372b8bd4b4398ea54353412fdd7fd42a8370c7e543e218cf7661978" "checksum tracing-futures 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "33848db47a7c848ab48b66aab3293cb9c61ea879a3586ecfcd17302fcea0baf1" +"checksum tracing-log 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" +"checksum tracing-subscriber 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "192ca16595cdd0661ce319e8eede9c975f227cdaabc4faaefdc256f43d852e45" +"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +"checksum tungstenite 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8a0c2bd5aeb7dcd2bb32e472c8872759308495e5eccc942e929a513cd8d36110" +"checksum typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" "checksum unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" "checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" "checksum unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7007dbd421b92cc6e28410fe7362e2e0a2503394908f417b68ec8d1c364c4e20" "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" +"checksum untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" +"checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" +"checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61" +"checksum urlencoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3df3561629a8bb4c57e5a2e4c43348d9e29c7c29d9b1c4c1f47166deca8f37ed" +"checksum utf-8 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)" = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7" +"checksum utf8-ranges 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b4ae116fef2b7fea257ed6440d3cfcff7f190865f170cdad00bb6465bf18ecba" "checksum uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" +"checksum vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" +"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" "checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9658c94fa8b940eab2250bd5a457f9c48b748420d71293b165c8cdbe2f55f71e" +"checksum want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" +"checksum want 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +"checksum warp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b11768dcc95dbbc7db573192cda35cdbbe59793f8409a4e11b87141a0930d6ed" "checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d" +"checksum wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "5205e9afdf42282b192e2310a5b463a6d1c1d774e30dc3c791ac37ab42d2616c" +"checksum wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "11cdb95816290b525b32587d76419facd99662a07e59d3cdb560488a819d9a45" +"checksum wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8bbdd49e3e28b40dec6a9ba8d17798245ce32b019513a845369c641b275135d9" +"checksum wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "574094772ce6921576fb6f2e3f7497b8a76273b6db092be18fc48a082de09dc3" +"checksum wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "e85031354f25eaebe78bb7db1c3d86140312a911a106b2e29f9cc440ce3e7668" +"checksum wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e7e61fc929f4c0dddb748b102ebf9f632e2b8d739f2016542b4de2965a9601" +"checksum wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "ef012a0d93fc0432df126a8eaf547b2dce25a8ce9212e1d3cbeef5c11157975d" +"checksum web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "aaf97caf6aa8c2b1dac90faf0db529d9d63c93846cca4911856f78a83cebf53b" +"checksum webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d7e664e770ac0110e2384769bcc59ed19e329d81f555916a6e072714957b81b4" +"checksum webpki-roots 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a262ae37dd9d60f60dd473d1158f9fbebf110ba7b6a5051c8160460f6043718b" +"checksum weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" "checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +"checksum winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +"checksum yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65923dd1784f44da1d2c3dbbc5e822045628c590ba72123e1c73d3c230c4434d" +"checksum yup-oauth2 3.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "687c1c52bf66691f1e7426e7520aecec25bf659835179095280a4acfcb24f63f" +"checksum zeroize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" diff --git a/Cargo.toml b/Cargo.toml index 6219b8f63..316310f9d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,20 +1,20 @@ [workspace] members = [ - # "./crates/ilp-cli", - # "./crates/ilp-node", - # "./crates/interledger", - # "./crates/interledger-api", - # "./crates/interledger-btp", - # "./crates/interledger-ccp", - # "./crates/interledger-http", + "./crates/ilp-cli", + "./crates/ilp-node", + "./crates/interledger", + "./crates/interledger-api", + "./crates/interledger-btp", + "./crates/interledger-ccp", + "./crates/interledger-http", "./crates/interledger-ildcp", "./crates/interledger-packet", "./crates/interledger-router", "./crates/interledger-service", - # "./crates/interledger-service-util", - # "./crates/interledger-settlement", - # "./crates/interledger-spsp", - # "./crates/interledger-store", - # "./crates/interledger-stream", + "./crates/interledger-service-util", + "./crates/interledger-settlement", + "./crates/interledger-spsp", + "./crates/interledger-store", + "./crates/interledger-stream", ] diff --git a/README.md b/README.md index 1bca9edef..fd54c5d42 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,16 @@ [![crates.io](https://img.shields.io/crates/v/interledger.svg)](https://crates.io/crates/interledger) [![Interledger.rs Documentation](https://docs.rs/interledger/badge.svg)](https://docs.rs/interledger) [![CircleCI](https://circleci.com/gh/interledger-rs/interledger-rs.svg?style=shield)](https://circleci.com/gh/interledger-rs/interledger-rs) -![Rust Version](https://img.shields.io/badge/rust-stable-Success) -[![Docker Image](https://img.shields.io/docker/pulls/interledgerrs/node.svg?maxAge=2592000)](https://hub.docker.com/r/interledgerrs/node/) +![rustc](https://img.shields.io/badge/rustc-1.39+-red.svg) +![Rust](https://img.shields.io/badge/rust-stable-Success) +[![Docker Image](https://img.shields.io/docker/pulls/interledgerrs/ilp-node.svg?maxAge=2592000)](https://hub.docker.com/r/interledgerrs/ilp-node/) + +## Requirements + +All crates require Rust 2018 edition and are tested on the following channels: + +- `1.39.0` (minimum supported) +- `stable` ## Connecting to the Testnet @@ -34,7 +42,7 @@ To run the Interledger.rs components by themselves (rather than the `testnet-bun #### Install ```bash # -docker pull interledgerrs/node +docker pull interledgerrs/ilp-node docker pull interledgerrs/ilp-cli docker pull interledgerrs/ilp-settlement-ethereum ``` @@ -43,7 +51,7 @@ docker pull interledgerrs/ilp-settlement-ethereum ```bash # # This runs the sender / receiver / router bundle -docker run -it interledgerrs/node +docker run -it interledgerrs/ilp-node # This is a simple CLI for interacting with the node's HTTP API docker run -it --rm interledgerrs/ilp-cli diff --git a/crates/ilp-cli/Cargo.toml b/crates/ilp-cli/Cargo.toml index 9ad32951c..d8b3e99a0 100644 --- a/crates/ilp-cli/Cargo.toml +++ b/crates/ilp-cli/Cargo.toml @@ -10,9 +10,9 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] clap = { version = "2.33.0", default-features = false } thiserror = { version = "1.0.4", default-features = false } -http = { version = "0.1.18", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } -serde = { version = "1.0.101", default-features = false } +http = { version = "0.2", default-features = false } +reqwest = { version = "0.10.1", default-features = false, features = ["default-tls", "blocking", "json"] } +serde = { version = "1.0.101", default-features = false, features = ["derive"] } serde_json = { version = "1.0.41", default-features = false } tungstenite = { version = "0.9.1", default-features = false, features = ["tls"] } url = { version = "2.1.0", default-features = false } diff --git a/crates/ilp-cli/src/interpreter.rs b/crates/ilp-cli/src/interpreter.rs index d49b9a2da..6d878df66 100644 --- a/crates/ilp-cli/src/interpreter.rs +++ b/crates/ilp-cli/src/interpreter.rs @@ -1,6 +1,9 @@ use clap::ArgMatches; use http; -use reqwest::{self, Client, Response}; +use reqwest::{ + self, + blocking::{Client, Response}, +}; use std::{borrow::Cow, collections::HashMap}; use tungstenite::{connect, handshake::client::Request}; use url::Url; diff --git a/crates/ilp-cli/src/main.rs b/crates/ilp-cli/src/main.rs index b88e1268d..2b971903d 100644 --- a/crates/ilp-cli/src/main.rs +++ b/crates/ilp-cli/src/main.rs @@ -23,26 +23,28 @@ pub fn main() { eprintln!("ilp-cli error: {}", e); exit(1); } - Ok(mut response) => match response.text() { - Err(e) => { - eprintln!("ilp-cli error: Failed to parse HTTP response: {}", e); - exit(1); - } - Ok(body) => { - if response.status().is_success() { - if !matches.is_present("quiet") { - println!("{}", body); - } - } else { - eprintln!( - "ilp-cli error: Unexpected response from server: {}: {}", - response.status(), - body, - ); + Ok(response) => { + let status = response.status(); + match response.text() { + Err(e) => { + eprintln!("ilp-cli error: Failed to parse HTTP response: {}", e); exit(1); } + Ok(body) => { + if status.is_success() { + if !matches.is_present("quiet") { + println!("{}", body); + } + } else { + eprintln!( + "ilp-cli error: Unexpected response from server: {}: {}", + status, body, + ); + exit(1); + } + } } - }, + } } } diff --git a/crates/ilp-node/Cargo.toml b/crates/ilp-node/Cargo.toml index 695170d77..b7fa07280 100644 --- a/crates/ilp-node/Cargo.toml +++ b/crates/ilp-node/Cargo.toml @@ -11,10 +11,19 @@ default-run = "ilp-node" [features] default = ["balance-tracking", "redis"] balance-tracking = [] +redis = ["redis_crate", "interledger/redis"] + # This is an experimental feature that enables submitting packet # records to Google Cloud PubSub. This may be removed in the future. google-pubsub = ["base64", "chrono", "parking_lot", "reqwest", "serde_json", "yup-oauth2"] -redis = ["redis_crate", "interledger/redis"] +# This enables monitoring and tracing related features +monitoring = [ + "metrics", + "metrics-core", + "metrics-runtime", + "tracing-futures", + "tracing-subscriber", +] [[test]] name = "redis_tests" @@ -24,26 +33,21 @@ required-features = ["redis"] [dependencies] bytes = { version = "0.4.12", default-features = false } +bytes05 = { package = "bytes", version = "0.5", default-features = false } clap = { version = "2.33.0", default-features = false } config = { version = "0.9.3", default-features = false, features = ["json", "toml", "yaml"] } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3.1", default-features = false, features = ["compat"] } hex = { version = "0.4.0", default-features = false } interledger = { path = "../interledger", version = "^0.6.0", default-features = false, features = ["node"] } lazy_static = { version = "1.4.0", default-features = false } -metrics = { version = "0.12.0", default-features = false, features = ["std"] } -metrics-core = { version = "0.5.1", default-features = false } -metrics-runtime = { version = "0.12.0", default-features = false, features = ["metrics-observer-prometheus"] } num-bigint = { version = "0.2.3", default-features = false, features = ["std"] } -redis_crate = { package = "redis", version = "0.13.0", default-features = false, features = ["executor"], optional = true } +redis_crate = { package = "redis", version = "0.15.1", optional = true, features = ["tokio-rt-core"] } ring = { version = "0.16.9", default-features = false } serde = { version = "1.0.101", default-features = false } -tokio = { version = "0.1.22", default-features = false } -tracing = { version = "0.1.9", default-features = true, features = ["log"] } -tracing-futures = { version = "0.1.1", default-features = true, features = ["tokio", "futures-01"] } -tracing-subscriber = { version = "0.1.6", default-features = true, features = ["tracing-log"] } +tokio = { version = "0.2.8", features = ["rt-core", "macros", "time"] } url = { version = "2.1.0", default-features = false } libc = { version = "0.2.62", default-features = false } -warp = { version = "0.1.20", default-features = false, features = ["websocket"] } +warp = { version = "0.2", default-features = false, features = ["websocket"] } secrecy = { version = "0.5.1", default-features = false, features = ["alloc", "serde"] } uuid = { version = "0.8.1", default-features = false} @@ -51,19 +55,28 @@ uuid = { version = "0.8.1", default-features = false} base64 = { version = "0.10.1", default-features = false, optional = true } chrono = { version = "0.4.9", default-features = false, features = [], optional = true} parking_lot = { version = "0.9.0", default-features = false, optional = true } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"], optional = true } +reqwest = { version = "0.10.0", default-features = false, features = ["default-tls", "json"], optional = true } serde_json = { version = "1.0.41", default-features = false, optional = true } yup-oauth2 = { version = "3.1.1", default-features = false, optional = true } + +# Tracing / metrics / prometheus for instrumentation +tracing = { version = "0.1.12", default-features = true, features = ["log"] } +tracing-futures = { version = "0.2", default-features = true, features = ["tokio", "futures-03"], optional = true } +tracing-subscriber = { version = "0.1.6", default-features = true, features = ["tracing-log"], optional = true } +metrics = { version = "0.12.0", default-features = false, features = ["std"], optional = true } +metrics-core = { version = "0.5.1", default-features = false, optional = true } +metrics-runtime = { version = "0.12.0", default-features = false, features = ["metrics-observer-prometheus"], optional = true } + [dev-dependencies] approx = { version = "0.3.2", default-features = false } base64 = { version = "0.10.1", default-features = false } net2 = { version = "0.2.33", default-features = false } rand = { version = "0.7.2", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10.0", default-features = false, features = ["default-tls", "json"] } serde_json = { version = "1.0.41", default-features = false } tokio-retry = { version = "0.2.0", default-features = false } [badges] circle-ci = { repository = "interledger-rs/interledger-rs" } -codecov = { repository = "interledger-rs/interledger-rs" } +codecov = { repository = "interledger-rs/interledger-rs" } \ No newline at end of file diff --git a/crates/ilp-node/src/google_pubsub.rs b/crates/ilp-node/src/google_pubsub.rs deleted file mode 100644 index 3f667a49e..000000000 --- a/crates/ilp-node/src/google_pubsub.rs +++ /dev/null @@ -1,186 +0,0 @@ -use base64; -use chrono::Utc; -use futures::{ - future::{ok, Either}, - Future, -}; -use interledger::{ - packet::Address, - service::{Account, BoxedIlpFuture, OutgoingRequest, OutgoingService, Username}, -}; -use parking_lot::Mutex; -use reqwest::r#async::Client; -use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, sync::Arc}; -use tokio::spawn; -use tracing::{error, info}; -use yup_oauth2::{service_account_key_from_file, GetToken, ServiceAccountAccess}; - -static TOKEN_SCOPES: Option<&str> = Some("https://www.googleapis.com/auth/pubsub"); - -/// Configuration for the Google PubSub packet publisher -#[derive(Deserialize, Clone, Debug)] -pub struct PubsubConfig { - /// Path to the Service Account Key JSON file. - /// You can obtain this file by logging into [console.cloud.google.com](https://console.cloud.google.com/) - service_account_credentials: String, - project_id: String, - topic: String, -} - -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -struct PubsubMessage { - message_id: Option, - data: Option, - attributes: Option>, - publish_time: Option, -} - -#[derive(Serialize)] -struct PubsubRequest { - messages: Vec, -} - -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -struct PacketRecord { - prev_hop_account: Username, - prev_hop_asset_code: String, - prev_hop_asset_scale: u8, - prev_hop_amount: u64, - next_hop_account: Username, - next_hop_asset_code: String, - next_hop_asset_scale: u8, - next_hop_amount: u64, - destination_ilp_address: Address, - fulfillment: String, - timestamp: String, -} - -/// Create an Interledger service wrapper that publishes records -/// of fulfilled packets to Google Cloud PubSub. -/// -/// This is an experimental feature that may be removed in the future. -pub fn create_google_pubsub_wrapper< - A: Account + 'static, - O: OutgoingService + Clone + Send + 'static, ->( - config: Option, -) -> impl Fn(OutgoingRequest, O) -> BoxedIlpFuture + Clone { - // If Google credentials were passed in, create an HTTP client and - // OAuth2 client that will automatically fetch and cache access tokens - let utilities = if let Some(config) = config { - let key = service_account_key_from_file(config.service_account_credentials.as_str()) - .expect("Unable to load Google Cloud credentials from file"); - let access = ServiceAccountAccess::new(key); - // This needs to be wrapped in a Mutex because the .token() - // method takes a mutable reference to self and we want to - // reuse the same fetcher so that it caches the tokens - let token_fetcher = Arc::new(Mutex::new(access.build())); - - // TODO make sure the client uses HTTP/2 - let client = Client::new(); - let api_endpoint = Arc::new(format!( - "https://pubsub.googleapis.com/v1/projects/{}/topics/{}:publish", - config.project_id, config.topic - )); - info!("Fulfilled packets will be submitted to Google Cloud Pubsub (project ID: {}, topic: {})", config.project_id, config.topic); - Some((client, api_endpoint, token_fetcher)) - } else { - None - }; - - move |request: OutgoingRequest, mut next: O| -> BoxedIlpFuture { - match &utilities { - // Just pass the request on if no Google Pubsub details were configured - None => Box::new(next.send_request(request)), - Some((client, api_endpoint, token_fetcher)) => { - let prev_hop_account = request.from.username().clone(); - let prev_hop_asset_code = request.from.asset_code().to_string(); - let prev_hop_asset_scale = request.from.asset_scale(); - let prev_hop_amount = request.original_amount; - let next_hop_account = request.to.username().clone(); - let next_hop_asset_code = request.to.asset_code().to_string(); - let next_hop_asset_scale = request.to.asset_scale(); - let next_hop_amount = request.prepare.amount(); - let destination_ilp_address = request.prepare.destination(); - let client = client.clone(); - let api_endpoint = api_endpoint.clone(); - let token_fetcher = token_fetcher.clone(); - - Box::new(next.send_request(request).map(move |fulfill| { - // Only fulfilled packets are published for now - let fulfillment = base64::encode(fulfill.fulfillment()); - - let get_token_future = token_fetcher.lock() - .token(TOKEN_SCOPES) - .map_err(|err| { - error!("Error fetching OAuth token for Google PubSub: {:?}", err) - }); - // Spawn a task to submit the packet to PubSub so we - // don't block returning the fulfillment - // Note this means that if there is a problem submitting the - // packet record to PubSub, it will only log an error - spawn( - get_token_future - .and_then(move |token| { - let record = PacketRecord { - prev_hop_account, - prev_hop_asset_code, - prev_hop_asset_scale, - prev_hop_amount, - next_hop_account, - next_hop_asset_code, - next_hop_asset_scale, - next_hop_amount, - destination_ilp_address, - fulfillment, - timestamp: Utc::now().to_rfc3339(), - }; - let data = base64::encode(&serde_json::to_string(&record).unwrap()); - - client - .post(api_endpoint.as_str()) - .bearer_auth(token.access_token) - .json(&PubsubRequest { - messages: vec![PubsubMessage { - // TODO should there be an ID? - message_id: None, - data: Some(data), - attributes: None, - publish_time: None, - }], - }) - .send() - .map_err(|err| { - error!( - "Error sending packet details to Google PubSub: {:?}", - err - ) - }) - .and_then(|mut res| { - if res.status().is_success() { - Either::A(ok(())) - } else { - let status = res.status(); - Either::B(res.text() - .map_err(|err| error!("Error getting response body: {:?}", err)) - .and_then(move |body| { - error!( - %status, - "Error sending packet details to Google PubSub: {}", - body - ); - Ok(()) - })) - } - }) - }), - ); - fulfill - })) - } - } - } -} diff --git a/crates/ilp-node/src/instrumentation/google_pubsub.rs b/crates/ilp-node/src/instrumentation/google_pubsub.rs new file mode 100644 index 000000000..402ab7169 --- /dev/null +++ b/crates/ilp-node/src/instrumentation/google_pubsub.rs @@ -0,0 +1,187 @@ +#[cfg(feature = "google_pubsub")] +use base64; +use chrono::Utc; +use futures::{compat::Future01CompatExt, Future, TryFutureExt}; +use interledger::{ + packet::Address, + service::{Account, IlpResult, OutgoingRequest, OutgoingService, Username}, +}; +use parking_lot::Mutex; +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, sync::Arc}; +use tokio::spawn; +use tracing::{error, info}; +use yup_oauth2::{service_account_key_from_file, GetToken, ServiceAccountAccess}; + +static TOKEN_SCOPES: Option<&str> = Some("https://www.googleapis.com/auth/pubsub"); + +/// Configuration for the Google PubSub packet publisher +#[derive(Deserialize, Clone, Debug)] +pub struct PubsubConfig { + /// Path to the Service Account Key JSON file. + /// You can obtain this file by logging into [console.cloud.google.com](https://console.cloud.google.com/) + service_account_credentials: String, + project_id: String, + topic: String, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct PubsubMessage { + message_id: Option, + data: Option, + attributes: Option>, + publish_time: Option, +} + +#[derive(Serialize)] +struct PubsubRequest { + messages: Vec, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct PacketRecord { + prev_hop_account: Username, + prev_hop_asset_code: String, + prev_hop_asset_scale: u8, + prev_hop_amount: u64, + next_hop_account: Username, + next_hop_asset_code: String, + next_hop_asset_scale: u8, + next_hop_amount: u64, + destination_ilp_address: Address, + fulfillment: String, + timestamp: String, +} + +use std::pin::Pin; +type BoxedIlpFuture = Box + Send + 'static>; + +/// Create an Interledger service wrapper that publishes records +/// of fulfilled packets to Google Cloud PubSub. +/// +/// This is an experimental feature that may be removed in the future. +pub fn create_google_pubsub_wrapper( + config: Option, +) -> impl Fn(OutgoingRequest, Box + Send>) -> Pin + Clone +{ + // If Google credentials were passed in, create an HTTP client and + // OAuth2 client that will automatically fetch and cache access tokens + let utilities = if let Some(config) = config { + let key = service_account_key_from_file(config.service_account_credentials.as_str()) + .expect("Unable to load Google Cloud credentials from file"); + let access = ServiceAccountAccess::new(key); + // This needs to be wrapped in a Mutex because the .token() + // method takes a mutable reference to self and we want to + // reuse the same fetcher so that it caches the tokens + let token_fetcher = Arc::new(Mutex::new(access.build())); + + // TODO make sure the client uses HTTP/2 + let client = Client::new(); + let api_endpoint = Arc::new(format!( + "https://pubsub.googleapis.com/v1/projects/{}/topics/{}:publish", + config.project_id, config.topic + )); + info!("Fulfilled packets will be submitted to Google Cloud Pubsub (project ID: {}, topic: {})", config.project_id, config.topic); + Some((client, api_endpoint, token_fetcher)) + } else { + None + }; + + move |request: OutgoingRequest, + mut next: Box + Send>| + -> Pin { + let (client, api_endpoint, token_fetcher) = if let Some(utilities) = utilities.clone() { + utilities + } else { + return Box::pin(async move { next.send_request(request).await }); + }; + + // Just pass the request on if no Google Pubsub details were configured + let prev_hop_account = request.from.username().clone(); + let prev_hop_asset_code = request.from.asset_code().to_string(); + let prev_hop_asset_scale = request.from.asset_scale(); + let prev_hop_amount = request.original_amount; + let next_hop_account = request.to.username().clone(); + let next_hop_asset_code = request.to.asset_code().to_string(); + let next_hop_asset_scale = request.to.asset_scale(); + let next_hop_amount = request.prepare.amount(); + let destination_ilp_address = request.prepare.destination(); + + Box::pin(async move { + let result = next.send_request(request).await; + + // Only fulfilled packets are published for now + if let Ok(fulfill) = result.clone() { + let fulfillment = base64::encode(fulfill.fulfillment()); + let get_token_future = + token_fetcher + .lock() + .token(TOKEN_SCOPES) + .compat() + .map_err(|err| { + error!("Error fetching OAuth token for Google PubSub: {:?}", err) + }); + + // Spawn a task to submit the packet to PubSub so we + // don't block returning the fulfillment + // Note this means that if there is a problem submitting the + // packet record to PubSub, it will only log an error + spawn(async move { + let token = get_token_future.await?; + let record = PacketRecord { + prev_hop_account, + prev_hop_asset_code, + prev_hop_asset_scale, + prev_hop_amount, + next_hop_account, + next_hop_asset_code, + next_hop_asset_scale, + next_hop_amount, + destination_ilp_address, + fulfillment, + timestamp: Utc::now().to_rfc3339(), + }; + let data = base64::encode(&serde_json::to_string(&record).unwrap()); + let res = client + .post(api_endpoint.as_str()) + .bearer_auth(token.access_token) + .json(&PubsubRequest { + messages: vec![PubsubMessage { + // TODO should there be an ID? + message_id: None, + data: Some(data), + attributes: None, + publish_time: None, + }], + }) + .send() + .map_err(|err| { + error!("Error sending packet details to Google PubSub: {:?}", err) + }) + .await?; + + // Log the error + if !res.status().is_success() { + let status = res.status(); + let body = res + .text() + .map_err(|err| error!("Error getting response body: {:?}", err)) + .await?; + error!( + %status, + "Error sending packet details to Google PubSub: {}", + body + ); + } + + Ok::<(), ()>(()) + }); + } + + result + }) + } +} diff --git a/crates/ilp-node/src/instrumentation/metrics.rs b/crates/ilp-node/src/instrumentation/metrics.rs new file mode 100644 index 000000000..8f3553a13 --- /dev/null +++ b/crates/ilp-node/src/instrumentation/metrics.rs @@ -0,0 +1,82 @@ +use interledger::{ + ccp::CcpRoutingAccount, + service::{ + Account, IlpResult, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService, + }, +}; +use metrics::{self, labels, recorder, Key}; +use std::time::Instant; + +pub async fn incoming_metrics( + request: IncomingRequest, + mut next: Box + Send>, +) -> IlpResult { + let labels = labels!( + "from_asset_code" => request.from.asset_code().to_string(), + "from_routing_relation" => request.from.routing_relation().to_string(), + ); + recorder().increment_counter( + Key::from_name_and_labels("requests.incoming.prepare", labels.clone()), + 1, + ); + let start_time = Instant::now(); + + let result = next.handle_request(request).await; + if result.is_ok() { + recorder().increment_counter( + Key::from_name_and_labels("requests.incoming.fulfill", labels.clone()), + 1, + ); + } else { + recorder().increment_counter( + Key::from_name_and_labels("requests.incoming.reject", labels.clone()), + 1, + ); + } + + recorder().record_histogram( + Key::from_name_and_labels("requests.incoming.duration", labels), + (Instant::now() - start_time).as_nanos() as u64, + ); + result +} + +pub async fn outgoing_metrics( + request: OutgoingRequest, + mut next: Box + Send>, +) -> IlpResult { + let labels = labels!( + "from_asset_code" => request.from.asset_code().to_string(), + "to_asset_code" => request.to.asset_code().to_string(), + "from_routing_relation" => request.from.routing_relation().to_string(), + "to_routing_relation" => request.to.routing_relation().to_string(), + ); + + // TODO replace these calls with the counter! macro if there's a way to easily pass in the already-created labels + // right now if you pass the labels into one of the other macros, it gets a recursion limit error while expanding the macro + recorder().increment_counter( + Key::from_name_and_labels("requests.outgoing.prepare", labels.clone()), + 1, + ); + let start_time = Instant::now(); + + let result = next.send_request(request).await; + if result.is_ok() { + recorder().increment_counter( + Key::from_name_and_labels("requests.outgoing.fulfill", labels.clone()), + 1, + ); + } else { + recorder().increment_counter( + Key::from_name_and_labels("requests.outgoing.reject", labels.clone()), + 1, + ); + } + + recorder().record_histogram( + Key::from_name_and_labels("requests.outgoing.duration", labels.clone()), + (Instant::now() - start_time).as_nanos() as u64, + ); + + result +} diff --git a/crates/ilp-node/src/instrumentation/mod.rs b/crates/ilp-node/src/instrumentation/mod.rs new file mode 100644 index 000000000..66113214f --- /dev/null +++ b/crates/ilp-node/src/instrumentation/mod.rs @@ -0,0 +1,10 @@ +#[cfg(feature = "monitoring")] +pub mod metrics; +#[cfg(feature = "monitoring")] +pub mod trace; + +#[cfg(feature = "monitoring")] +pub mod prometheus; + +#[cfg(feature = "google-pubsub")] +pub mod google_pubsub; diff --git a/crates/ilp-node/src/instrumentation/prometheus.rs b/crates/ilp-node/src/instrumentation/prometheus.rs new file mode 100644 index 000000000..bbd0d5184 --- /dev/null +++ b/crates/ilp-node/src/instrumentation/prometheus.rs @@ -0,0 +1,90 @@ +use crate::InterledgerNode; +use metrics_core::{Builder, Drain, Observe}; +use metrics_runtime; +use serde::Deserialize; +use std::{net::SocketAddr, sync::Arc, time::Duration}; +use tracing::{error, info}; +use warp::{ + http::{Response, StatusCode}, + Filter, +}; + +/// Configuration for [Prometheus](https://prometheus.io) metrics collection. +#[derive(Deserialize, Clone)] +pub struct PrometheusConfig { + /// IP address and port to host the Prometheus endpoint on. + pub bind_address: SocketAddr, + /// Amount of time, in milliseconds, that the node will collect data points for the + /// Prometheus histograms. Defaults to 300000ms (5 minutes). + #[serde(default = "PrometheusConfig::default_histogram_window")] + pub histogram_window: u64, + /// Granularity, in milliseconds, that the node will use to roll off old data. + /// For example, a value of 1000ms (1 second) would mean that the node forgets the oldest + /// 1 second of histogram data points every second. Defaults to 10000ms (10 seconds). + #[serde(default = "PrometheusConfig::default_histogram_granularity")] + pub histogram_granularity: u64, +} + +impl PrometheusConfig { + fn default_histogram_window() -> u64 { + 300_000 + } + + fn default_histogram_granularity() -> u64 { + 10_000 + } +} + +/// Starts a Prometheus metrics server that will listen on the configured address. +/// +/// # Errors +/// This will fail if another Prometheus server is already running in this +/// process or on the configured port. +#[allow(clippy::cognitive_complexity)] +pub async fn serve_prometheus(node: InterledgerNode) -> Result<(), ()> { + let prometheus = if let Some(ref prometheus) = node.prometheus { + prometheus + } else { + error!(target: "interledger-node", "No prometheus configuration provided"); + return Err(()); + }; + + // Set up the metrics collector + let receiver = metrics_runtime::Builder::default() + .histogram( + Duration::from_millis(prometheus.histogram_window), + Duration::from_millis(prometheus.histogram_granularity), + ) + .build() + .expect("Failed to create metrics Receiver"); + + let controller = receiver.controller(); + // Try installing the global recorder + match metrics::set_boxed_recorder(Box::new(receiver)) { + Ok(_) => { + let observer = Arc::new(metrics_runtime::observers::PrometheusBuilder::default()); + + let filter = warp::get().and(warp::path::end()).map(move || { + let mut observer = observer.build(); + controller.observe(&mut observer); + let prometheus_response = observer.drain(); + Response::builder() + .status(StatusCode::OK) + .header("Content-Type", "text/plain; version=0.0.4") + .body(prometheus_response) + }); + + info!(target: "interledger-node", + "Prometheus metrics server listening on: {}", + prometheus.bind_address + ); + + tokio::spawn(warp::serve(filter).bind(prometheus.bind_address)); + Ok(()) + } + Err(e) => { + error!(target: "interledger-node", "Error installing global metrics recorder (this is likely caused by trying to run two nodes with Prometheus metrics in the same process): {:?}", e); + Err(()) + } + } +} diff --git a/crates/ilp-node/src/trace.rs b/crates/ilp-node/src/instrumentation/trace.rs similarity index 82% rename from crates/ilp-node/src/trace.rs rename to crates/ilp-node/src/instrumentation/trace.rs index 911517926..7f886c52c 100644 --- a/crates/ilp-node/src/trace.rs +++ b/crates/ilp-node/src/instrumentation/trace.rs @@ -1,8 +1,9 @@ -use futures::Future; use interledger::{ ccp::{CcpRoutingAccount, RoutingRelation}, packet::{ErrorCode, Fulfill, Reject}, - service::{Account, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService}, + service::{ + Account, IlpResult, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService, + }, }; use std::str; use tracing::{debug_span, error_span, info, info_span}; @@ -12,10 +13,10 @@ use uuid::Uuid; /// Add tracing context for the incoming request. /// This adds minimal information for the ERROR log /// level and more information for the DEBUG level. -pub fn trace_incoming( +pub async fn trace_incoming( request: IncomingRequest, - mut next: impl IncomingService, -) -> impl Future { + mut next: Box + Send>, +) -> IlpResult { let request_span = error_span!(target: "interledger-node", "incoming", request.id = %Uuid::new_v4(), @@ -37,19 +38,17 @@ pub fn trace_incoming( ); let _details_scope = details_span.enter(); - next.handle_request(request) - .then(trace_response) - .in_current_span() + next.handle_request(request).in_current_span().await } /// Add tracing context when the incoming request is /// being forwarded and turned into an outgoing request. /// This adds minimal information for the ERROR log /// level and more information for the DEBUG level. -pub fn trace_forwarding( +pub async fn trace_forwarding( request: OutgoingRequest, - mut next: impl OutgoingService, -) -> impl Future { + mut next: Box + Send>, +) -> IlpResult { // Here we only include the outgoing details because this will be // inside the "incoming" span that includes the other details let request_span = error_span!(target: "interledger-node", @@ -66,16 +65,16 @@ pub fn trace_forwarding( ); let _details_scope = details_span.enter(); - next.send_request(request).in_current_span() + next.send_request(request).in_current_span().await } /// Add tracing context for the outgoing request (created by this node). /// This adds minimal information for the ERROR log /// level and more information for the DEBUG level. -pub fn trace_outgoing( +pub async fn trace_outgoing( request: OutgoingRequest, - mut next: impl OutgoingService, -) -> impl Future { + mut next: Box + Send>, +) -> IlpResult { let request_span = error_span!(target: "interledger-node", "outgoing", request.id = %Uuid::new_v4(), @@ -100,16 +99,14 @@ pub fn trace_outgoing( // because there's a good chance they'll be offline let ignore_rejects = request.prepare.destination().scheme() == "peer" && request.to.routing_relation() == RoutingRelation::Child; - next.send_request(request) - .then(move |result| { - if let Err(ref err) = result { - if err.code() == ErrorCode::F02_UNREACHABLE && ignore_rejects { - return result; - } - } - trace_response(result) - }) - .in_current_span() + + let result = next.send_request(request).in_current_span().await; + if let Err(ref err) = result { + if err.code() == ErrorCode::F02_UNREACHABLE && ignore_rejects { + return result; + } + } + trace_response(result) } /// Log whether the response was a Fulfill or Reject diff --git a/crates/ilp-node/src/lib.rs b/crates/ilp-node/src/lib.rs index 1086e4991..7f20db21b 100644 --- a/crates/ilp-node/src/lib.rs +++ b/crates/ilp-node/src/lib.rs @@ -1,15 +1,8 @@ -#![type_length_limit = "1152909"] - -mod metrics; +#![type_length_limit = "5000000"] +mod instrumentation; mod node; -mod trace; -#[cfg(feature = "google-pubsub")] -mod google_pubsub; #[cfg(feature = "redis")] mod redis_store; pub use node::*; -#[allow(deprecated)] -#[cfg(feature = "redis")] -pub use redis_store::insert_account_with_redis_store; diff --git a/crates/ilp-node/src/main.rs b/crates/ilp-node/src/main.rs index eb4921fa4..a4b155e80 100644 --- a/crates/ilp-node/src/main.rs +++ b/crates/ilp-node/src/main.rs @@ -1,11 +1,7 @@ -#![type_length_limit = "1152909"] +#![type_length_limit = "5000000"] +mod instrumentation; +pub mod node; -mod metrics; -mod node; -mod trace; - -#[cfg(feature = "google-pubsub")] -mod google_pubsub; #[cfg(feature = "redis")] mod redis_store; @@ -19,12 +15,16 @@ use std::{ io::Read, vec::Vec, }; + +#[cfg(feature = "monitoring")] use tracing_subscriber::{ filter::EnvFilter, fmt::{time::ChronoUtc, Subscriber}, }; -pub fn main() { +#[tokio::main] +async fn main() { + #[cfg(feature = "monitoring")] Subscriber::builder() .with_timer(ChronoUtc::rfc3339()) .with_env_filter(EnvFilter::from_default_env()) @@ -143,7 +143,13 @@ pub fn main() { } let matches = app.clone().get_matches(); merge_args(&mut config, &matches); - config.try_into::().unwrap().run(); + + let node = config.try_into::().unwrap(); + node.serve().await.unwrap(); + + // Add a future which is always pending. This will ensure main does not exist + // TODO: Is there a better way of doing this? + futures::future::pending().await } // returns (subcommand paths, config path) diff --git a/crates/ilp-node/src/metrics.rs b/crates/ilp-node/src/metrics.rs deleted file mode 100644 index 35ba411ab..000000000 --- a/crates/ilp-node/src/metrics.rs +++ /dev/null @@ -1,82 +0,0 @@ -use futures::Future; -use interledger::{ - ccp::CcpRoutingAccount, - packet::{Fulfill, Reject}, - service::{Account, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService}, -}; -use metrics::{self, labels, recorder, Key}; -use std::time::Instant; - -pub fn incoming_metrics( - request: IncomingRequest, - mut next: impl IncomingService, -) -> impl Future { - let labels = labels!( - "from_asset_code" => request.from.asset_code().to_string(), - "from_routing_relation" => request.from.routing_relation().to_string(), - ); - recorder().increment_counter( - Key::from_name_and_labels("requests.incoming.prepare", labels.clone()), - 1, - ); - let start_time = Instant::now(); - - next.handle_request(request).then(move |result| { - if result.is_ok() { - recorder().increment_counter( - Key::from_name_and_labels("requests.incoming.fulfill", labels.clone()), - 1, - ); - } else { - recorder().increment_counter( - Key::from_name_and_labels("requests.incoming.reject", labels.clone()), - 1, - ); - } - recorder().record_histogram( - Key::from_name_and_labels("requests.incoming.duration", labels), - (Instant::now() - start_time).as_nanos() as u64, - ); - result - }) -} - -pub fn outgoing_metrics( - request: OutgoingRequest, - mut next: impl OutgoingService, -) -> impl Future { - let labels = labels!( - "from_asset_code" => request.from.asset_code().to_string(), - "to_asset_code" => request.to.asset_code().to_string(), - "from_routing_relation" => request.from.routing_relation().to_string(), - "to_routing_relation" => request.to.routing_relation().to_string(), - ); - - // TODO replace these calls with the counter! macro if there's a way to easily pass in the already-created labels - // right now if you pass the labels into one of the other macros, it gets a recursion limit error while expanding the macro - recorder().increment_counter( - Key::from_name_and_labels("requests.outgoing.prepare", labels.clone()), - 1, - ); - let start_time = Instant::now(); - - next.send_request(request).then(move |result| { - if result.is_ok() { - recorder().increment_counter( - Key::from_name_and_labels("requests.outgoing.fulfill", labels.clone()), - 1, - ); - } else { - recorder().increment_counter( - Key::from_name_and_labels("requests.outgoing.reject", labels.clone()), - 1, - ); - } - - recorder().record_histogram( - Key::from_name_and_labels("requests.outgoing.duration", labels.clone()), - (Instant::now() - start_time).as_nanos() as u64, - ); - result - }) -} diff --git a/crates/ilp-node/src/node.rs b/crates/ilp-node/src/node.rs index c8275c0aa..4c9f2404b 100644 --- a/crates/ilp-node/src/node.rs +++ b/crates/ilp-node/src/node.rs @@ -1,10 +1,29 @@ -use crate::metrics::{incoming_metrics, outgoing_metrics}; -use crate::trace::{trace_forwarding, trace_incoming, trace_outgoing}; -use bytes::Bytes; -use futures::{ - future::{err, Either}, - Future, +#[cfg(feature = "google-pubsub")] +use crate::instrumentation::google_pubsub::{create_google_pubsub_wrapper, PubsubConfig}; + +#[cfg(feature = "monitoring")] +use tracing_futures::Instrument; + +#[cfg(feature = "monitoring")] +use tracing::debug_span; + +#[cfg(feature = "monitoring")] +use crate::instrumentation::{ + metrics::{incoming_metrics, outgoing_metrics}, + prometheus::{serve_prometheus, PrometheusConfig}, + trace::{trace_forwarding, trace_incoming, trace_outgoing}, }; + +#[cfg(feature = "monitoring")] +use interledger::service::IncomingService; +#[cfg(any(feature = "monitoring", feature = "google-pubsub"))] +use interledger::service::OutgoingService; + +#[cfg(feature = "monitoring")] +use futures::FutureExt; + +use bytes::Bytes; +use futures::TryFutureExt; use hex::FromHex; use interledger::{ api::{NodeApi, NodeStore}, @@ -16,8 +35,7 @@ use interledger::{ packet::{ErrorCode, RejectBuilder}, router::{Router, RouterStore}, service::{ - outgoing_service_fn, Account as AccountTrait, AccountStore, IncomingService, - OutgoingRequest, OutgoingService, Username, + outgoing_service_fn, Account as AccountTrait, AccountStore, OutgoingRequest, Username, }, service_util::{ BalanceStore, EchoService, ExchangeRateFetcher, ExchangeRateService, ExchangeRateStore, @@ -35,25 +53,15 @@ use interledger::{ stream::{StreamNotificationsStore, StreamReceiverService}, }; use lazy_static::lazy_static; -use metrics_core::{Builder, Drain, Observe}; -use metrics_runtime; use num_bigint::BigUint; use serde::{de::Error as DeserializeError, Deserialize, Deserializer}; -use std::sync::Arc; use std::{convert::TryFrom, net::SocketAddr, str, str::FromStr, time::Duration}; use tokio::spawn; -use tracing::{debug, debug_span, error, info}; -use tracing_futures::Instrument; +use tracing::{debug, error, info}; use url::Url; use uuid::Uuid; -use warp::{ - self, - http::{Response, StatusCode}, - Filter, -}; +use warp::{self, Filter}; -#[cfg(feature = "google-pubsub")] -use crate::google_pubsub::{create_google_pubsub_wrapper, PubsubConfig}; #[cfg(feature = "redis")] use crate::redis_store::*; #[cfg(feature = "balance-tracking")] @@ -121,32 +129,6 @@ where } } -/// Configuration for [Prometheus](https://prometheus.io) metrics collection. -#[derive(Deserialize, Clone)] -pub struct PrometheusConfig { - /// IP address and port to host the Prometheus endpoint on. - pub bind_address: SocketAddr, - /// Amount of time, in milliseconds, that the node will collect data points for the - /// Prometheus histograms. Defaults to 300000ms (5 minutes). - #[serde(default = "PrometheusConfig::default_histogram_window")] - pub histogram_window: u64, - /// Granularity, in milliseconds, that the node will use to roll off old data. - /// For example, a value of 1000ms (1 second) would mean that the node forgets the oldest - /// 1 second of histogram data points every second. Defaults to 10000ms (10 seconds). - #[serde(default = "PrometheusConfig::default_histogram_granularity")] - pub histogram_granularity: u64, -} - -impl PrometheusConfig { - fn default_histogram_window() -> u64 { - 300_000 - } - - fn default_histogram_granularity() -> u64 { - 10_000 - } -} - /// Configuration for calculating exchange rates between various pairs. #[derive(Deserialize, Clone, Default)] pub struct ExchangeRateConfig { @@ -226,6 +208,8 @@ pub struct InterledgerNode { pub exchange_rate: ExchangeRateConfig, /// Configuration for [Prometheus](https://prometheus.io) metrics collection. /// If this configuration is not provided, the node will not collect metrics. + /// Needs the feature flag "monitoring" to be enabled + #[cfg(feature = "monitoring")] #[serde(default)] pub prometheus: Option, #[cfg(feature = "google-pubsub")] @@ -239,19 +223,26 @@ impl InterledgerNode { /// also run the Prometheus metrics server on the given address. // TODO when a BTP connection is made, insert a outgoing HTTP entry into the Store to tell other // connector instances to forward packets for that account to us - pub fn serve(self) -> impl Future { - if self.prometheus.is_some() { - Either::A( - self.serve_prometheus() - .join(self.serve_node()) - .and_then(|_| Ok(())), - ) - } else { - Either::B(self.serve_node()) - } + pub async fn serve(self) -> Result<(), ()> { + #[cfg(feature = "monitoring")] + let f = + futures::future::join(serve_prometheus(self.clone()), self.serve_node()).then(|r| { + async move { + if r.0.is_ok() || r.1.is_ok() { + Ok(()) + } else { + Err(()) + } + } + }); + + #[cfg(not(feature = "monitoring"))] + let f = self.serve_node(); + + f.await } - fn serve_node(self) -> Box + Send + 'static> { + async fn serve_node(self) -> Result<(), ()> { let ilp_address = if let Some(address) = &self.ilp_address { address.clone() } else { @@ -266,26 +257,22 @@ impl InterledgerNode { "The string '{}' could not be parsed as a URL: {}", &self.database_url, e ); - return Box::new(err(())); + return Err(()); } }; match database_url.scheme() { #[cfg(feature = "redis")] - "redis" | "redis+unix" => Box::new(serve_redis_node(self, ilp_address)), + "redis" | "redis+unix" => serve_redis_node(self, ilp_address).await, other => { error!("unsupported data source scheme: {}", other); - Box::new(err(())) + Err(()) } } } #[allow(clippy::cognitive_complexity)] - pub(crate) fn chain_services( - self, - store: S, - ilp_address: Address, - ) -> impl Future + pub(crate) async fn chain_services(self, store: S, ilp_address: Address) -> Result<(), ()> where S: NodeStore + BtpStore @@ -327,257 +314,211 @@ impl InterledgerNode { #[cfg(feature = "google-pubsub")] let google_pubsub = self.google_pubsub.clone(); - store.clone().get_btp_outgoing_accounts() - .map_err(|_| error!(target: "interledger-node", "Error getting accounts")) - .and_then(move |btp_accounts| { - let outgoing_service = - outgoing_service_fn(move |request: OutgoingRequest| { - // Don't log anything for failed route updates sent to child accounts - // because there's a good chance they'll be offline - if request.prepare.destination().scheme() != "peer" - || request.to.routing_relation() != RoutingRelation::Child { - error!(target: "interledger-node", "No route found for outgoing request"); - } - Err(RejectBuilder { - code: ErrorCode::F02_UNREACHABLE, - message: &format!( - // TODO we might not want to expose the internal account ID in the error - "No outgoing route for account: {} (ILP address of the Prepare packet: {})", - request.to.id(), - request.prepare.destination(), - ) - .as_bytes(), - triggered_by: Some(&ilp_address_clone), - data: &[], - } - .build()) - }); - - // Connect to all of the accounts that have outgoing ilp_over_btp_urls configured - // but don't fail if we are unable to connect - // TODO try reconnecting to those accounts later - connect_client(ilp_address_clone2.clone(), btp_accounts, false, outgoing_service) - .and_then( - move |btp_client_service| { - let btp_server_service = BtpOutgoingService::new(ilp_address_clone2, btp_client_service.clone()); - let btp_server_service_clone = btp_server_service.clone(); - let btp = btp_client_service.clone(); - - // The BTP service is both an Incoming and Outgoing one so we pass it first as the Outgoing - // service to others like the router and then call handle_incoming on it to set up the incoming handler - let outgoing_service = btp_server_service.clone(); - let outgoing_service = HttpClientService::new( - store.clone(), - outgoing_service, - ); - - let outgoing_service = outgoing_service.wrap(outgoing_metrics); - - // Note: the expiry shortener must come after the Validator so that the expiry duration - // is shortened before we check whether there is enough time left - let outgoing_service = ValidatorService::outgoing( - store.clone(), - outgoing_service - ); - let outgoing_service = - ExpiryShortenerService::new(outgoing_service); - let outgoing_service = StreamReceiverService::new( - secret_seed.clone(), - store.clone(), - outgoing_service, - ); - #[cfg(feature = "balance-tracking")] - let outgoing_service = BalanceService::new( - store.clone(), - outgoing_service, - ); - let outgoing_service = ExchangeRateService::new( - exchange_rate_spread, - store.clone(), - outgoing_service, - ); - - #[cfg(feature = "google-pubsub")] - let outgoing_service = outgoing_service.wrap(create_google_pubsub_wrapper(google_pubsub)); - - // Set up the Router and Routing Manager - let incoming_service = Router::new( - store.clone(), - // Add tracing to add the outgoing request details to the incoming span - outgoing_service.clone().wrap(trace_forwarding), - ); - - // Add tracing to track the outgoing request details - let outgoing_service = outgoing_service.wrap(trace_outgoing).in_current_span(); - - let mut ccp_builder = CcpRouteManagerBuilder::new( - ilp_address.clone(), - store.clone(), - outgoing_service.clone(), - incoming_service, - ); - ccp_builder.ilp_address(ilp_address.clone()); - if let Some(ms) = route_broadcast_interval { - ccp_builder.broadcast_interval(ms); - } - let incoming_service = ccp_builder.to_service(); - let incoming_service = EchoService::new(store.clone(), incoming_service); - let incoming_service = SettlementMessageService::new(incoming_service); - let incoming_service = IldcpService::new(incoming_service); - let incoming_service = - MaxPacketAmountService::new( - store.clone(), - incoming_service - ); - let incoming_service = - ValidatorService::incoming(store.clone(), incoming_service); - let incoming_service = RateLimitService::new( - store.clone(), - incoming_service, - ); - - // Add tracing to track the incoming request details - let incoming_service = incoming_service.wrap(trace_incoming).in_current_span(); - - let incoming_service = incoming_service.wrap(incoming_metrics); - - // Handle incoming packets sent via BTP - btp_server_service.handle_incoming(incoming_service.clone().wrap(|request, mut next| { - let btp = debug_span!(target: "interledger-node", "btp"); - let _btp_scope = btp.enter(); - next.handle_request(request).in_current_span() - }).in_current_span()); - btp_client_service.handle_incoming(incoming_service.clone().wrap(|request, mut next| { - let btp = debug_span!(target: "interledger-node", "btp"); - let _btp_scope = btp.enter(); - next.handle_request(request).in_current_span() - }).in_current_span()); - - // Node HTTP API - let mut api = NodeApi::new( - secret_seed, - admin_auth_token, - store.clone(), - incoming_service.clone().wrap(|request, mut next| { - let api = debug_span!(target: "interledger-node", "api"); - let _api_scope = api.enter(); - next.handle_request(request).in_current_span() - }).in_current_span(), - outgoing_service.clone(), - btp.clone(), - ); - if let Some(username) = default_spsp_account { - api.default_spsp_account(username); - } - api.node_version(env!("CARGO_PKG_VERSION").to_string()); - // add an API of ILP over HTTP and add rejection handler - let api = api.into_warp_filter() - .or(IlpOverHttpServer::new(incoming_service.clone().wrap(|request, mut next| { - let http = debug_span!(target: "interledger-node", "http"); - let _http_scope = http.enter(); - next.handle_request(request).in_current_span() - }).in_current_span(), store.clone()).as_filter()) - .or(btp_service_as_filter(btp_server_service_clone, store.clone())) - .recover(default_rejection_handler) - .with(warp::log("interledger-api")).boxed(); - - info!(target: "interledger-node", "Interledger.rs node HTTP API listening on: {}", http_bind_address); - spawn(warp::serve(api).bind(http_bind_address)); - - // Settlement API - let settlement_api = create_settlements_filter( - store.clone(), - outgoing_service.clone(), - ); - info!(target: "interledger-node", "Settlement API listening on: {}", settlement_api_bind_address); - spawn(warp::serve(settlement_api).bind(settlement_api_bind_address)); - - // Exchange Rate Polling - if let Some(provider) = exchange_rate_provider { - let exchange_rate_fetcher = ExchangeRateFetcher::new(provider, exchange_rate_poll_failure_tolerance, store.clone()); - exchange_rate_fetcher.spawn_interval(Duration::from_millis(exchange_rate_poll_interval)); - } else { - debug!(target: "interledger-node", "Not using exchange rate provider. Rates must be set via the HTTP API"); - } + let btp_accounts = store + .get_btp_outgoing_accounts() + .map_err(|_| error!(target: "interledger-node", "Error getting accounts")) + .await?; + + let outgoing_service = outgoing_service_fn(move |request: OutgoingRequest| { + // Don't log anything for failed route updates sent to child accounts + // because there's a good chance they'll be offline + if request.prepare.destination().scheme() != "peer" + || request.to.routing_relation() != RoutingRelation::Child + { + error!(target: "interledger-node", "No route found for outgoing request"); + } + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: &format!( + // TODO we might not want to expose the internal account ID in the error + "No outgoing route for account: {} (ILP address of the Prepare packet: {})", + request.to.id(), + request.prepare.destination(), + ) + .as_bytes(), + triggered_by: Some(&ilp_address_clone), + data: &[], + } + .build()) + }); + + // Connect to all of the accounts that have outgoing ilp_over_btp_urls configured + // but don't fail if we are unable to connect + // TODO try reconnecting to those accounts later + let btp_client_service = connect_client( + ilp_address_clone2.clone(), + btp_accounts, + false, + outgoing_service, + ) + .await?; + let btp_server_service = + BtpOutgoingService::new(ilp_address_clone2, btp_client_service.clone()); + let btp_server_service_clone = btp_server_service.clone(); + let btp = btp_client_service.clone(); + + // The BTP service is both an Incoming and Outgoing one so we pass it first as the Outgoing + // service to others like the router and then call handle_incoming on it to set up the incoming handler + let outgoing_service = btp_server_service.clone(); + let outgoing_service = HttpClientService::new(store.clone(), outgoing_service); + + #[cfg(feature = "monitoring")] + let outgoing_service = outgoing_service.wrap(outgoing_metrics); + + // Note: the expiry shortener must come after the Validator so that the expiry duration + // is shortened before we check whether there is enough time left + let outgoing_service = ValidatorService::outgoing(store.clone(), outgoing_service); + let outgoing_service = ExpiryShortenerService::new(outgoing_service); + let outgoing_service = + StreamReceiverService::new(secret_seed.clone(), store.clone(), outgoing_service); + #[cfg(feature = "balance-tracking")] + let outgoing_service = BalanceService::new(store.clone(), outgoing_service); + let outgoing_service = + ExchangeRateService::new(exchange_rate_spread, store.clone(), outgoing_service); - Ok(()) - }, - ) - }) - .in_current_span() - } + #[cfg(feature = "google-pubsub")] + let outgoing_service = outgoing_service.wrap(create_google_pubsub_wrapper(google_pubsub)); + + // Add tracing to add the outgoing request details to the incoming span + #[cfg(feature = "monitoring")] + let outgoing_service_fwd = outgoing_service + .clone() + .wrap(trace_forwarding) + .in_current_span(); + #[cfg(not(feature = "monitoring"))] + let outgoing_service_fwd = outgoing_service.clone(); + + // Set up the Router and Routing Manager + let incoming_service = Router::new(store.clone(), outgoing_service_fwd); + + // Add tracing to track the outgoing request details + #[cfg(feature = "monitoring")] + let outgoing_service = outgoing_service.wrap(trace_outgoing).in_current_span(); + + let mut ccp_builder = CcpRouteManagerBuilder::new( + ilp_address.clone(), + store.clone(), + outgoing_service.clone(), + incoming_service, + ); + ccp_builder.ilp_address(ilp_address.clone()); + if let Some(ms) = route_broadcast_interval { + ccp_builder.broadcast_interval(ms); + } - /// Starts a Prometheus metrics server that will listen on the configured address. - /// - /// # Errors - /// This will fail if another Prometheus server is already running in this - /// process or on the configured port. - #[allow(clippy::cognitive_complexity)] - fn serve_prometheus(&self) -> impl Future { - Box::new(if let Some(ref prometheus) = self.prometheus { - // Set up the metrics collector - let receiver = metrics_runtime::Builder::default() - .histogram( - Duration::from_millis(prometheus.histogram_window), - Duration::from_millis(prometheus.histogram_granularity), - ) - .build() - .expect("Failed to create metrics Receiver"); - let controller = receiver.controller(); - // Try installing the global recorder - match metrics::set_boxed_recorder(Box::new(receiver)) { - Ok(_) => { - let observer = - Arc::new(metrics_runtime::observers::PrometheusBuilder::default()); - - let filter = warp::get2().and(warp::path::end()).map(move || { - let mut observer = observer.build(); - controller.observe(&mut observer); - let prometheus_response = observer.drain(); - Response::builder() - .status(StatusCode::OK) - .header("Content-Type", "text/plain; version=0.0.4") - .body(prometheus_response) - }); - - info!(target: "interledger-node", - "Prometheus metrics server listening on: {}", - prometheus.bind_address - ); - Either::A( - warp::serve(filter) - .bind(prometheus.bind_address) - .map_err(|_| { - error!(target: "interledger-node", "Error binding Prometheus server to the configured address") - }), - ) + let incoming_service = ccp_builder.to_service(); + let incoming_service = EchoService::new(store.clone(), incoming_service); + let incoming_service = SettlementMessageService::new(incoming_service); + let incoming_service = IldcpService::new(incoming_service); + let incoming_service = MaxPacketAmountService::new(store.clone(), incoming_service); + let incoming_service = ValidatorService::incoming(store.clone(), incoming_service); + let incoming_service = RateLimitService::new(store.clone(), incoming_service); + + // Add tracing to track the incoming request details + #[cfg(feature = "monitoring")] + let incoming_service = incoming_service + .wrap(trace_incoming) + .in_current_span() + .wrap(incoming_metrics); + + // Handle incoming packets sent via BTP + #[cfg(feature = "monitoring")] + let incoming_service_btp = incoming_service + .clone() + .wrap(|request, mut next| { + async move { + let btp = debug_span!(target: "interledger-node", "btp"); + let _btp_scope = btp.enter(); + next.handle_request(request).in_current_span().await } - Err(e) => { - error!(target: "interledger-node", "Error installing global metrics recorder (this is likely caused by trying to run two nodes with Prometheus metrics in the same process): {:?}", e); - Either::B(err(())) + }) + .in_current_span(); + #[cfg(not(feature = "monitoring"))] + let incoming_service_btp = incoming_service.clone(); + + btp_server_service + .handle_incoming(incoming_service_btp.clone()) + .await; + + btp_client_service + .handle_incoming(incoming_service_btp) + .await; + + #[cfg(feature = "monitoring")] + let incoming_service_api = incoming_service + .clone() + .wrap(|request, mut next| { + async move { + let api = debug_span!(target: "interledger-node", "api"); + let _api_scope = api.enter(); + next.handle_request(request).in_current_span().await } - } + }) + .in_current_span(); + #[cfg(not(feature = "monitoring"))] + let incoming_service_api = incoming_service.clone(); + + // Node HTTP API + let mut api = NodeApi::new( + bytes05::Bytes::copy_from_slice(secret_seed.as_ref()), + admin_auth_token, + store.clone(), + incoming_service_api.clone(), + outgoing_service.clone(), + btp.clone(), // btp client service! + ); + if let Some(username) = default_spsp_account { + api.default_spsp_account(username); + } + api.node_version(env!("CARGO_PKG_VERSION").to_string()); + + #[cfg(feature = "monitoring")] + let incoming_service_http = incoming_service + .clone() + .wrap(|request, mut next| { + async move { + let http = debug_span!(target: "interledger-node", "http"); + let _http_scope = http.enter(); + next.handle_request(request).in_current_span().await + } + }) + .in_current_span(); + #[cfg(not(feature = "monitoring"))] + let incoming_service_http = incoming_service.clone(); + + // add an API of ILP over HTTP and add rejection handler + let api = api + .into_warp_filter() + .or(IlpOverHttpServer::new(incoming_service_http, store.clone()).as_filter()) + .or(btp_service_as_filter( + btp_server_service_clone, + store.clone(), + )) + .recover(default_rejection_handler) + .with(warp::log("interledger-api")) + .boxed(); + + info!(target: "interledger-node", "Interledger.rs node HTTP API listening on: {}", http_bind_address); + spawn(warp::serve(api).bind(http_bind_address)); + + // Settlement API + let settlement_api = create_settlements_filter(store.clone(), outgoing_service.clone()); + info!(target: "interledger-node", "Settlement API listening on: {}", settlement_api_bind_address); + spawn(warp::serve(settlement_api).bind(settlement_api_bind_address)); + + // Exchange Rate Polling + if let Some(provider) = exchange_rate_provider { + let exchange_rate_fetcher = ExchangeRateFetcher::new( + provider, + exchange_rate_poll_failure_tolerance, + store.clone(), + ); + exchange_rate_fetcher + .spawn_interval(Duration::from_millis(exchange_rate_poll_interval)); } else { - error!(target: "interledger-node", "No prometheus configuration provided"); - Either::B(err(())) - }) - } + debug!(target: "interledger-node", "Not using exchange rate provider. Rates must be set via the HTTP API"); + } - /// Run the node on the default Tokio runtime - pub fn run(self) { - tokio_run(self.serve()); + Ok(()) } } - -#[doc(hidden)] -pub fn tokio_run(fut: impl Future + Send + 'static) { - let mut runtime = tokio::runtime::Builder::new() - // Don't swallow panics - .panic_handler(|err| std::panic::resume_unwind(err)) - .name_prefix("interledger-rs-worker-") - .build() - .expect("failed to start new runtime"); - - runtime.spawn(fut); - runtime.shutdown_on_idle().wait().unwrap(); -} diff --git a/crates/ilp-node/src/redis_store.rs b/crates/ilp-node/src/redis_store.rs index ef9a957c0..7377ab884 100644 --- a/crates/ilp-node/src/redis_store.rs +++ b/crates/ilp-node/src/redis_store.rs @@ -1,7 +1,7 @@ #![cfg(feature = "redis")] use crate::node::InterledgerNode; -use futures::{future::result, Future}; +use futures::TryFutureExt; pub use interledger::{ api::{AccountDetails, NodeStore}, packet::Address, @@ -10,8 +10,7 @@ pub use interledger::{ }; pub use redis_crate::{ConnectionInfo, IntoConnectionInfo}; use ring::hmac; -use tracing::{debug, error}; -use uuid::Uuid; +use tracing::error; static REDIS_SECRET_GENERATION_STRING: &str = "ilp_redis_secret"; @@ -22,18 +21,16 @@ pub fn default_redis_url() -> String { // This function could theoretically be defined as an inherent method on InterledgerNode itself. // However, we define it in this module in order to consolidate conditionally-compiled code // into as few discrete units as possible. -pub fn serve_redis_node( - node: InterledgerNode, - ilp_address: Address, -) -> impl Future { +pub async fn serve_redis_node(node: InterledgerNode, ilp_address: Address) -> Result<(), ()> { let redis_connection_info = node.database_url.clone().into_connection_info().unwrap(); let redis_addr = redis_connection_info.addr.clone(); let redis_secret = generate_redis_secret(&node.secret_seed); - Box::new(RedisStoreBuilder::new(redis_connection_info, redis_secret) - .node_ilp_address(ilp_address.clone()) - .connect() - .map_err(move |err| error!(target: "interledger-node", "Error connecting to Redis: {:?} {:?}", redis_addr, err)) - .and_then(move |store| node.chain_services(store, ilp_address))) + let store = RedisStoreBuilder::new(redis_connection_info, redis_secret) + .node_ilp_address(ilp_address.clone()) + .connect() + .map_err(move |err| error!(target: "interledger-node", "Error connecting to Redis: {:?} {:?}", redis_addr, err)) + .await?; + node.chain_services(store, ilp_address).await } pub fn generate_redis_secret(secret_seed: &[u8; 32]) -> [u8; 32] { @@ -45,28 +42,3 @@ pub fn generate_redis_secret(secret_seed: &[u8; 32]) -> [u8; 32] { redis_secret.copy_from_slice(sig.as_ref()); redis_secret } - -#[doc(hidden)] -#[allow(dead_code)] -#[deprecated(note = "use HTTP API instead")] -pub fn insert_account_with_redis_store( - node: &InterledgerNode, - account: AccountDetails, -) -> impl Future { - let redis_secret = generate_redis_secret(&node.secret_seed); - result(node.database_url.clone().into_connection_info()) - .map_err( - |err| error!(target: "interledger-node", "Invalid Redis connection details: {:?}", err), - ) - .and_then(move |redis_url| RedisStoreBuilder::new(redis_url, redis_secret).connect()) - .map_err(|err| error!(target: "interledger-node", "Error connecting to Redis: {:?}", err)) - .and_then(move |store| { - store - .insert_account(account) - .map_err(|_| error!(target: "interledger-node", "Unable to create account")) - .and_then(|account| { - debug!(target: "interledger-node", "Created account: {}", account.id()); - Ok(account.id()) - }) - }) -} diff --git a/crates/ilp-node/tests/redis/btp.rs b/crates/ilp-node/tests/redis/btp.rs index 2ccbaebe8..fbe98fb8b 100644 --- a/crates/ilp-node/tests/redis/btp.rs +++ b/crates/ilp-node/tests/redis/btp.rs @@ -1,16 +1,14 @@ use crate::redis_helpers::*; use crate::test_helpers::*; -use futures::{future::join_all, Future}; use ilp_node::InterledgerNode; use serde_json::{self, json}; -use tokio::runtime::Builder as RuntimeBuilder; -use tracing::error_span; -use tracing_futures::Instrument; +// use tracing::error_span; +// use tracing_futures::Instrument; -#[test] -fn two_nodes_btp() { +#[tokio::test] +async fn two_nodes_btp() { // Nodes 1 and 2 are peers, Node 2 is the parent of Node 2 - install_tracing_subscriber(); + // install_tracing_subscriber(); let context = TestContext::new(); // Each node will use its own DB within the redis instance @@ -24,11 +22,6 @@ fn two_nodes_btp() { let node_b_http = get_open_port(None); let node_b_settlement = get_open_port(None); - let mut runtime = RuntimeBuilder::new() - .panic_handler(|err| std::panic::resume_unwind(err)) - .build() - .unwrap(); - let alice_on_a = json!({ "username": "alice_on_a", "asset_code": "XYZ", @@ -87,117 +80,82 @@ fn two_nodes_btp() { })) .expect("Error creating node_b."); - // FIXME This should be fixed after SQL store is implemented. - // https://github.com/interledger-rs/interledger-rs/issues/464 - let alice_fut = create_account_on_node(node_a_http, alice_on_a, "admin") - .and_then(move |_| create_account_on_node(node_a_http, b_on_a, "admin")); + node_b.serve().await.unwrap(); + create_account_on_node(node_b_http, a_on_b, "admin") + .await + .unwrap(); + create_account_on_node(node_b_http, bob_on_b, "admin") + .await + .unwrap(); - runtime.spawn( - node_a - .serve() - .instrument(error_span!(target: "interledger", "node_a")), - ); + node_a.serve().await.unwrap(); + create_account_on_node(node_a_http, alice_on_a, "admin") + .await + .unwrap(); + create_account_on_node(node_a_http, b_on_a, "admin") + .await + .unwrap(); - let bob_fut = join_all(vec![ - create_account_on_node(node_b_http, a_on_b, "admin"), - create_account_on_node(node_b_http, bob_on_b, "admin"), - ]); + let get_balances = move || { + futures::future::join_all(vec![ + get_balance("alice_on_a", node_a_http, "admin"), + get_balance("bob_on_b", node_b_http, "admin"), + ]) + }; - runtime.spawn( - node_b - .serve() - .instrument(error_span!(target: "interledger", "node_b")), - ); + send_money_to_username( + node_a_http, + node_b_http, + 1000, + "bob_on_b", + "alice_on_a", + "default account holder", + ) + .await + .unwrap(); - runtime - .block_on( - // Wait for the nodes to spin up - delay(500) - .map_err(|_| panic!("Something strange happened when `delay`")) - .and_then(move |_| { - bob_fut - .and_then(|_| alice_fut) - .and_then(|_| delay(500).map_err(|_| panic!("delay error"))) - }) - .and_then(move |_| { - let send_1_to_2 = send_money_to_username( - node_a_http, - node_b_http, - 1000, - "bob_on_b", - "alice_on_a", - "default account holder", - ); - let send_2_to_1 = send_money_to_username( - node_b_http, - node_a_http, - 2000, - "alice_on_a", - "bob_on_b", - "default account holder", - ); + let ret = get_balances().await; + let ret: Vec<_> = ret.into_iter().map(|r| r.unwrap()).collect(); + assert_eq!( + ret[0], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: -1e-6 + } + ); + assert_eq!( + ret[1], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: 1e-6 + } + ); - let get_balances = move || { - futures::future::join_all(vec![ - get_balance("alice_on_a", node_a_http, "admin"), - get_balance("bob_on_b", node_b_http, "admin"), - ]) - }; + send_money_to_username( + node_b_http, + node_a_http, + 2000, + "alice_on_a", + "bob_on_b", + "default account holder", + ) + .await + .unwrap(); - send_1_to_2 - .map_err(|err| { - eprintln!("Error sending from node 1 to node 2: {:?}", err); - err - }) - .and_then(move |_| { - get_balances().and_then(move |ret| { - assert_eq!( - ret[0], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: -1e-6 - } - ); - assert_eq!( - ret[1], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: 1e-6 - } - ); - Ok(()) - }) - }) - .and_then(move |_| { - send_2_to_1.map_err(|err| { - eprintln!("Error sending from node 2 to node 1: {:?}", err); - err - }) - }) - .and_then(move |_| { - get_balances().and_then(move |ret| { - assert_eq!( - ret[0], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: 1e-6 - } - ); - assert_eq!( - ret[1], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: -1e-6 - } - ); - Ok(()) - }) - }) - }), - ) - .map_err(|err| { - eprintln!("Error executing tests: {:?}", err); - err - }) - .unwrap(); + let ret = get_balances().await; + let ret: Vec<_> = ret.into_iter().map(|r| r.unwrap()).collect(); + assert_eq!( + ret[0], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: 1e-6 + } + ); + assert_eq!( + ret[1], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: -1e-6 + } + ); } diff --git a/crates/ilp-node/tests/redis/exchange_rates.rs b/crates/ilp-node/tests/redis/exchange_rates.rs index 194ceff9a..cd811feb6 100644 --- a/crates/ilp-node/tests/redis/exchange_rates.rs +++ b/crates/ilp-node/tests/redis/exchange_rates.rs @@ -1,26 +1,17 @@ use crate::redis_helpers::*; use crate::test_helpers::*; -use futures::Future; use ilp_node::InterledgerNode; -use reqwest::r#async::Client; +use reqwest::Client; use secrecy::SecretString; use serde_json::{self, json, Value}; use std::env; -use tokio::runtime::Builder as RuntimeBuilder; -use tokio_retry::{strategy::FibonacciBackoff, Retry}; +use std::time::Duration; use tracing::error; -use tracing_subscriber; -#[test] -fn coincap() { - install_tracing_subscriber(); +#[tokio::test] +async fn coincap() { let context = TestContext::new(); - let mut runtime = RuntimeBuilder::new() - .panic_handler(|err| std::panic::resume_unwind(err)) - .build() - .unwrap(); - let http_port = get_open_port(None); let node: InterledgerNode = serde_json::from_value(json!({ @@ -33,57 +24,38 @@ fn coincap() { "secret_seed": random_secret(), "route_broadcast_interval": 200, "exchange_rate": { - "poll_interval": 60000, + "poll_interval": 100, "provider": "coincap", }, })) .unwrap(); - runtime.spawn(node.serve()); + node.serve().await.unwrap(); - let get_rates = move || { - Client::new() - .get(&format!("http://localhost:{}/rates", http_port)) - .send() - .map_err(|_| panic!("Error getting rates")) - .and_then(|mut res| res.json().map_err(|_| panic!("Error getting body"))) - .and_then(|body: Value| { - if let Value::Object(obj) = body { - if obj.is_empty() { - error!("Rates are empty"); - return Err(()); - } - assert_eq!( - format!("{}", obj.get("USD").expect("Should have USD rate")).as_str(), - "1.0" - ); - assert!(obj.contains_key("EUR")); - assert!(obj.contains_key("JPY")); - assert!(obj.contains_key("BTC")); - assert!(obj.contains_key("ETH")); - assert!(obj.contains_key("XRP")); - } else { - panic!("Not an object"); - } + // Wait a few seconds so our node can poll the API + tokio::time::delay_for(Duration::from_millis(1000)).await; - Ok(()) - }) - }; - - runtime - .block_on( - delay(1000) - .map_err(|_| panic!("Something strange happened")) - .and_then(move |_| { - Retry::spawn(FibonacciBackoff::from_millis(1000).take(5), get_rates) - }), - ) + let ret = Client::new() + .get(&format!("http://localhost:{}/rates", http_port)) + .send() + .await .unwrap(); + let txt = ret.text().await.unwrap(); + let obj: Value = serde_json::from_str(&txt).unwrap(); + + assert_eq!( + format!("{}", obj.get("USD").expect("Should have USD rate")).as_str(), + "1.0" + ); + assert!(obj.get("EUR").is_some()); + assert!(obj.get("JPY").is_some()); + assert!(obj.get("BTC").is_some()); + assert!(obj.get("ETH").is_some()); + assert!(obj.get("XRP").is_some()); } // TODO can we disable this with conditional compilation? -#[test] -fn cryptocompare() { - tracing_subscriber::fmt::try_init().unwrap_or(()); +#[tokio::test] +async fn cryptocompare() { let context = TestContext::new(); let api_key = env::var("ILP_TEST_CRYPTOCOMPARE_API_KEY"); @@ -93,11 +65,6 @@ fn cryptocompare() { } let api_key = SecretString::new(api_key.unwrap()); - let mut runtime = RuntimeBuilder::new() - .panic_handler(|err| std::panic::resume_unwind(err)) - .build() - .unwrap(); - let http_port = get_open_port(Some(3011)); let node: InterledgerNode = serde_json::from_value(json!({ @@ -110,7 +77,7 @@ fn cryptocompare() { "secret_seed": random_secret(), "route_broadcast_interval": 200, "exchange_rate": { - "poll_interval": 60000, + "poll_interval": 100, "provider": { "cryptocompare": api_key }, @@ -118,42 +85,24 @@ fn cryptocompare() { }, })) .unwrap(); - runtime.spawn(node.serve()); + node.serve().await.unwrap(); - let get_rates = move || { - Client::new() - .get(&format!("http://localhost:{}/rates", http_port)) - .send() - .map_err(|_| panic!("Error getting rates")) - .and_then(|mut res| res.json().map_err(|_| panic!("Error getting body"))) - .and_then(|body: Value| { - if let Value::Object(obj) = body { - if obj.is_empty() { - error!("Rates are empty"); - return Err(()); - } - assert_eq!( - format!("{}", obj.get("USD").expect("Should have USD rate")).as_str(), - "1.0" - ); - assert!(obj.contains_key("BTC")); - assert!(obj.contains_key("ETH")); - assert!(obj.contains_key("XRP")); - } else { - panic!("Not an object"); - } + // Wait a few seconds so our node can poll the API + tokio::time::delay_for(Duration::from_millis(1000)).await; - Ok(()) - }) - }; - - runtime - .block_on( - delay(1000) - .map_err(|_| panic!("Something strange happened")) - .and_then(move |_| { - Retry::spawn(FibonacciBackoff::from_millis(1000).take(5), get_rates) - }), - ) + let ret = Client::new() + .get(&format!("http://localhost:{}/rates", http_port)) + .send() + .await .unwrap(); + let txt = ret.text().await.unwrap(); + let obj: Value = serde_json::from_str(&txt).unwrap(); + + assert_eq!( + format!("{}", obj.get("USD").expect("Should have USD rate")).as_str(), + "1.0" + ); + assert!(obj.get("BTC").is_some()); + assert!(obj.get("ETH").is_some()); + assert!(obj.get("XRP").is_some()); } diff --git a/crates/ilp-node/tests/redis/prometheus.rs b/crates/ilp-node/tests/redis/prometheus.rs index a9631314b..8ac012e1a 100644 --- a/crates/ilp-node/tests/redis/prometheus.rs +++ b/crates/ilp-node/tests/redis/prometheus.rs @@ -1,15 +1,18 @@ use crate::redis_helpers::*; use crate::test_helpers::*; -use futures::{future::join_all, Future}; +use futures::TryFutureExt; use ilp_node::InterledgerNode; -use reqwest::r#async::Client; +use reqwest::Client; use serde_json::{self, json}; -use tokio::runtime::Builder as RuntimeBuilder; -#[test] -fn prometheus() { +#[tokio::test] +async fn prometheus() { // Nodes 1 and 2 are peers, Node 2 is the parent of Node 2 - install_tracing_subscriber(); + tracing_subscriber::fmt::Subscriber::builder() + .with_timer(tracing_subscriber::fmt::time::ChronoUtc::rfc3339()) + .with_env_filter(tracing_subscriber::filter::EnvFilter::from_default_env()) + .try_init() + .unwrap_or(()); let context = TestContext::new(); // Each node will use its own DB within the redis instance @@ -24,11 +27,6 @@ fn prometheus() { let node_b_settlement = get_open_port(None); let prometheus_port = get_open_port(None); - let mut runtime = RuntimeBuilder::new() - .panic_handler(|err| std::panic::resume_unwind(err)) - .build() - .unwrap(); - let alice_on_a = json!({ "ilp_address": "example.node_a.alice", "username": "alice_on_a", @@ -91,102 +89,72 @@ fn prometheus() { })) .unwrap(); - let alice_fut = join_all(vec![ - create_account_on_node(node_a_http, alice_on_a, "admin"), - create_account_on_node(node_a_http, b_on_a, "admin"), - ]); - - runtime.spawn(node_a.serve()); - - let bob_fut = join_all(vec![ - create_account_on_node(node_b_http, a_on_b, "admin"), - create_account_on_node(node_b_http, bob_on_b, "admin"), - ]); - - runtime.spawn(node_b.serve()); - - runtime - .block_on( - // Wait for the nodes to spin up - delay(500) - .map_err(|_| panic!("Something strange happened")) - .and_then(move |_| { - bob_fut - .and_then(|_| alice_fut) - .and_then(|_| delay(500).map_err(|_| panic!("delay error"))) - }) - .and_then(move |_| { - let send_1_to_2 = send_money_to_username( - node_a_http, - node_b_http, - 1000, - "bob_on_b", - "alice_on_a", - "token", - ); - let send_2_to_1 = send_money_to_username( - node_b_http, - node_a_http, - 2000, - "alice_on_a", - "bob_on_b", - "token", - ); - - let check_metrics = move || { - Client::new() - .get(&format!("http://127.0.0.1:{}", prometheus_port)) - .send() - .map_err(|err| eprintln!("Error getting metrics {:?}", err)) - .and_then(|mut res| { - res.text().map_err(|err| { - eprintln!("Response was not a string: {:?}", err) - }) - }) - }; + node_a.serve().await.unwrap(); + node_b.serve().await.unwrap(); - send_1_to_2 - .map_err(|err| { - eprintln!("Error sending from node 1 to node 2: {:?}", err); - err - }) - .and_then(move |_| { - check_metrics().and_then(move |ret| { - assert!(ret.starts_with("# metrics snapshot")); - assert!(ret.contains("requests_incoming_fulfill")); - assert!(ret.contains("requests_incoming_prepare")); - assert!(ret.contains("requests_incoming_reject")); - assert!(ret.contains("requests_incoming_duration")); - assert!(ret.contains("requests_outgoing_fulfill")); - assert!(ret.contains("requests_outgoing_prepare")); - assert!(ret.contains("requests_outgoing_reject")); - assert!(ret.contains("requests_outgoing_duration")); - // TODO check the specific numbers of packets - Ok(()) - }) - }) - .and_then(move |_| { - send_2_to_1.map_err(|err| { - eprintln!("Error sending from node 2 to node 1: {:?}", err); - err - }) - }) - .and_then(move |_| { - check_metrics().and_then(move |ret| { - assert!(ret.starts_with("# metrics snapshot")); - assert!(ret.contains("requests_incoming_fulfill")); - assert!(ret.contains("requests_incoming_prepare")); - assert!(ret.contains("requests_incoming_reject")); - assert!(ret.contains("requests_incoming_duration")); - assert!(ret.contains("requests_outgoing_fulfill")); - assert!(ret.contains("requests_outgoing_prepare")); - assert!(ret.contains("requests_outgoing_reject")); - assert!(ret.contains("requests_outgoing_duration")); - // TODO check the specific numbers of packets - Ok(()) - }) - }) - }), - ) + create_account_on_node(node_b_http, a_on_b, "admin") + .await .unwrap(); + create_account_on_node(node_b_http, bob_on_b, "admin") + .await + .unwrap(); + create_account_on_node(node_a_http, alice_on_a, "admin") + .await + .unwrap(); + create_account_on_node(node_a_http, b_on_a, "admin") + .await + .unwrap(); + + let check_metrics = move || { + Client::new() + .get(&format!("http://127.0.0.1:{}", prometheus_port)) + .send() + .map_err(|err| eprintln!("Error getting metrics {:?}", err)) + .and_then(|res| { + res.text() + .map_err(|err| eprintln!("Response was not a string: {:?}", err)) + }) + }; + + send_money_to_username( + node_a_http, + node_b_http, + 1000, + "bob_on_b", + "alice_on_a", + "token", + ) + .await + .unwrap(); + let ret = check_metrics().await.unwrap(); + assert!(ret.starts_with("# metrics snapshot")); + assert!(ret.contains("requests_incoming_fulfill")); + assert!(ret.contains("requests_incoming_prepare")); + assert!(ret.contains("requests_incoming_reject")); + assert!(ret.contains("requests_incoming_duration")); + assert!(ret.contains("requests_outgoing_fulfill")); + assert!(ret.contains("requests_outgoing_prepare")); + assert!(ret.contains("requests_outgoing_reject")); + assert!(ret.contains("requests_outgoing_duration")); + + send_money_to_username( + node_b_http, + node_a_http, + 2000, + "alice_on_a", + "bob_on_b", + "token", + ) + .await + .unwrap(); + let ret = check_metrics().await.unwrap(); + assert!(ret.starts_with("# metrics snapshot")); + assert!(ret.contains("requests_incoming_fulfill")); + assert!(ret.contains("requests_incoming_prepare")); + assert!(ret.contains("requests_incoming_reject")); + assert!(ret.contains("requests_incoming_duration")); + assert!(ret.contains("requests_outgoing_fulfill")); + assert!(ret.contains("requests_outgoing_prepare")); + assert!(ret.contains("requests_outgoing_reject")); + assert!(ret.contains("requests_outgoing_duration")); } diff --git a/crates/ilp-node/tests/redis/redis_helpers.rs b/crates/ilp-node/tests/redis/redis_helpers.rs index 0c61f4475..b541650fd 100644 --- a/crates/ilp-node/tests/redis/redis_helpers.rs +++ b/crates/ilp-node/tests/redis/redis_helpers.rs @@ -1,15 +1,14 @@ // Copied from https://github.com/mitsuhiko/redis-rs/blob/9a1777e8a90c82c315a481cdf66beb7d69e681a2/tests/support/mod.rs #![allow(dead_code)] -use futures::Future; +use futures::TryFutureExt; use redis_crate::{self as redis, ConnectionAddr, ConnectionInfo, RedisError}; use std::env; use std::fs; use std::path::PathBuf; use std::process; use std::thread::sleep; -use std::time::{Duration, Instant}; -use tokio::timer::Delay; +use std::time::Duration; #[allow(unused)] pub fn connection_info_to_string(info: ConnectionInfo) -> String { @@ -40,8 +39,8 @@ pub fn get_open_port(try_port: Option) -> u16 { panic!("Cannot find open port!"); } -pub fn delay(ms: u64) -> impl Future { - Delay::new(Instant::now() + Duration::from_millis(ms)).map_err(|err| panic!(err)) +pub async fn delay(ms: u64) { + tokio::time::delay_for(Duration::from_millis(ms)).await; } #[derive(PartialEq)] @@ -190,20 +189,21 @@ impl TestContext { self.client.get_connection().unwrap() } - pub fn async_connection( - &self, - ) -> impl Future { - self.client.get_async_connection() + pub async fn async_connection(&self) -> Result { + self.client + .get_async_connection() + .map_err(|err| panic!(err)) + .await } pub fn stop_server(&mut self) { self.server.stop(); } - pub fn shared_async_connection( + pub async fn shared_async_connection( &self, - ) -> impl Future { - self.client.get_shared_async_connection() + ) -> Result { + self.client.get_multiplexed_tokio_connection().await } } diff --git a/crates/ilp-node/tests/redis/redis_tests.rs b/crates/ilp-node/tests/redis/redis_tests.rs index 0ec67ff68..e1db4d529 100644 --- a/crates/ilp-node/tests/redis/redis_tests.rs +++ b/crates/ilp-node/tests/redis/redis_tests.rs @@ -1,7 +1,11 @@ +#![type_length_limit = "5000000"] mod btp; mod exchange_rates; -mod prometheus; mod three_nodes; +// Only run prometheus tests if the monitoring feature is turned on +#[cfg(feature = "monitoring")] +mod prometheus; + mod redis_helpers; mod test_helpers; diff --git a/crates/ilp-node/tests/redis/test_helpers.rs b/crates/ilp-node/tests/redis/test_helpers.rs index 8f2b0e00a..aea1407c9 100644 --- a/crates/ilp-node/tests/redis/test_helpers.rs +++ b/crates/ilp-node/tests/redis/test_helpers.rs @@ -1,4 +1,4 @@ -use futures::{stream::Stream, Future}; +use futures::TryFutureExt; use hex; use interledger::stream::StreamDelivery; use interledger::{packet::Address, service::Account as AccountTrait, store::account::Account}; @@ -8,17 +8,9 @@ use serde_json::json; use std::collections::HashMap; use std::fmt::{Debug, Display}; use std::str; -use tracing_subscriber; +// use tracing_subscriber; use uuid::Uuid; -pub fn install_tracing_subscriber() { - tracing_subscriber::fmt::Subscriber::builder() - .with_timer(tracing_subscriber::fmt::time::ChronoUtc::rfc3339()) - .with_env_filter(tracing_subscriber::filter::EnvFilter::from_default_env()) - .try_init() - .unwrap_or(()); -} - #[allow(unused)] pub fn random_secret() -> String { let mut bytes: [u8; 32] = [0; 32]; @@ -33,56 +25,58 @@ pub struct BalanceData { } #[allow(unused)] -pub fn create_account_on_node( +pub async fn create_account_on_node( api_port: u16, data: T, auth: &str, -) -> impl Future { - let client = reqwest::r#async::Client::new(); - client +) -> Result { + let client = reqwest::Client::new(); + let res = client .post(&format!("http://localhost:{}/accounts", api_port)) .header("Content-Type", "application/json") .header("Authorization", format!("Bearer {}", auth)) .json(&data) .send() - .and_then(move |res| res.error_for_status()) - .and_then(move |res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error creating account on node: {:?}", err); - }) - .and_then(move |chunk| Ok(str::from_utf8(&chunk).unwrap().to_string())) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + + Ok(res.json::().map_err(|_| ()).await.unwrap()) } #[allow(unused)] -pub fn create_account_on_engine( +pub async fn create_account_on_engine( engine_port: u16, account_id: T, -) -> impl Future { - let client = reqwest::r#async::Client::new(); - client +) -> Result { + let client = reqwest::Client::new(); + let res = client .post(&format!("http://localhost:{}/accounts", engine_port)) .header("Content-Type", "application/json") .json(&json!({ "id": account_id })) .send() - .and_then(move |res| res.error_for_status()) - .and_then(move |res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error creating account: {:?}", err); - }) - .and_then(move |chunk| Ok(str::from_utf8(&chunk).unwrap().to_string())) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + + let data: bytes05::Bytes = res.bytes().map_err(|_| ()).await?; + + Ok(str::from_utf8(&data).unwrap().to_string()) } #[allow(unused)] -pub fn send_money_to_username( +pub async fn send_money_to_username( from_port: u16, to_port: u16, amount: u64, to_username: T, from_username: &str, from_auth: &str, -) -> impl Future { - let client = reqwest::r#async::Client::new(); - client +) -> Result { + let client = reqwest::Client::new(); + let res = client .post(&format!( "http://localhost:{}/accounts/{}/payments", from_port, from_username @@ -93,36 +87,27 @@ pub fn send_money_to_username( "source_amount": amount, })) .send() - .and_then(|res| res.error_for_status()) - .and_then(|res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error sending SPSP payment: {:?}", err); - }) - .and_then(move |body| { - let ret: StreamDelivery = serde_json::from_slice(&body).unwrap(); - Ok(ret) - }) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + Ok(res.json::().await.unwrap()) } #[allow(unused)] -pub fn get_all_accounts( - node_port: u16, - admin_token: &str, -) -> impl Future, Error = ()> { - let client = reqwest::r#async::Client::new(); - client +pub async fn get_all_accounts(node_port: u16, admin_token: &str) -> Result, ()> { + let client = reqwest::Client::new(); + let res = client .get(&format!("http://localhost:{}/accounts", node_port)) .header("Authorization", format!("Bearer {}", admin_token)) .send() - .and_then(|res| res.error_for_status()) - .and_then(|res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error getting account data: {:?}", err); - }) - .and_then(move |body| { - let ret: Vec = serde_json::from_slice(&body).unwrap(); - Ok(ret) - }) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + let body: bytes05::Bytes = res.bytes().map_err(|_| ()).await?; + let ret: Vec = serde_json::from_slice(&body).unwrap(); + Ok(ret) } #[allow(unused)] @@ -135,26 +120,24 @@ pub fn accounts_to_ids(accounts: Vec) -> HashMap { } #[allow(unused)] -pub fn get_balance( +pub async fn get_balance( account_id: T, node_port: u16, admin_token: &str, -) -> impl Future { - let client = reqwest::r#async::Client::new(); - client +) -> Result { + let client = reqwest::Client::new(); + let res = client .get(&format!( "http://localhost:{}/accounts/{}/balance", node_port, account_id )) .header("Authorization", format!("Bearer {}", admin_token)) .send() - .and_then(|res| res.error_for_status()) - .and_then(|res| res.into_body().concat2()) - .map_err(|err| { - eprintln!("Error getting account data: {:?}", err); - }) - .and_then(|body| { - let ret: BalanceData = serde_json::from_slice(&body).unwrap(); - Ok(ret) - }) + .map_err(|_| ()) + .await?; + + let res = res.error_for_status().map_err(|_| ())?; + let body: bytes05::Bytes = res.bytes().map_err(|_| ()).await?; + let ret: BalanceData = serde_json::from_slice(&body).unwrap(); + Ok(ret) } diff --git a/crates/ilp-node/tests/redis/three_nodes.rs b/crates/ilp-node/tests/redis/three_nodes.rs index caf90cb8d..95f3f3d92 100644 --- a/crates/ilp-node/tests/redis/three_nodes.rs +++ b/crates/ilp-node/tests/redis/three_nodes.rs @@ -1,21 +1,13 @@ use crate::redis_helpers::*; use crate::test_helpers::*; -use futures::{future::join_all, stream::*, sync::mpsc, Future}; use ilp_node::InterledgerNode; use interledger::packet::Address; -use interledger::stream::StreamDelivery; use serde_json::json; use std::str::FromStr; -use tokio::runtime::Builder as RuntimeBuilder; -use tracing::{debug, error_span}; -use tracing_futures::Instrument; -const LOG_TARGET: &str = "interledger-tests-three-nodes"; - -#[test] -fn three_nodes() { +#[tokio::test] +async fn three_nodes() { // Nodes 1 and 2 are peers, Node 2 is the parent of Node 3 - install_tracing_subscriber(); let context = TestContext::new(); // Each node will use its own DB within the redis instance @@ -32,12 +24,6 @@ fn three_nodes() { let node2_settlement = get_open_port(None); let node3_http = get_open_port(None); let node3_settlement = get_open_port(None); - - let mut runtime = RuntimeBuilder::new() - .panic_handler(|err| std::panic::resume_unwind(err)) - .build() - .unwrap(); - let alice_on_alice = json!({ "ilp_address": "example.alice", "username": "alice_on_a", @@ -138,213 +124,154 @@ fn three_nodes() { })) .expect("Error creating node3."); - let (finish_sender, finish_receiver) = mpsc::channel(0); + node1.serve().await.unwrap(); + create_account_on_node(node1_http, alice_on_alice, "admin") + .await + .unwrap(); + create_account_on_node(node1_http, bob_on_alice, "admin") + .await + .unwrap(); - let alice_fut = join_all(vec![ - create_account_on_node(node1_http, alice_on_alice, "admin"), - create_account_on_node(node1_http, bob_on_alice, "admin"), - ]); + node2.serve().await.unwrap(); + create_account_on_node(node2_http, alice_on_bob, "admin") + .await + .unwrap(); + create_account_on_node(node2_http, charlie_on_bob, "admin") + .await + .unwrap(); + // Also set exchange rates + let client = reqwest::Client::new(); + client + .put(&format!("http://localhost:{}/rates", node2_http)) + .header("Authorization", "Bearer admin") + .json(&json!({"ABC": 1, "XYZ": 2})) + .send() + .await + .unwrap(); - let mut node1_finish_sender = finish_sender.clone(); - runtime.spawn( - node1 - .serve() - .and_then(move |_| alice_fut) - .and_then(move |_| { - node1_finish_sender - .try_send(1) - .expect("Could not send message from node_1"); - Ok(()) - }) - .instrument(error_span!(target: "interledger", "node1")), - ); + node3.serve().await.unwrap(); + create_account_on_node(node3_http, charlie_on_charlie, "admin") + .await + .unwrap(); + create_account_on_node(node3_http, bob_on_charlie, "admin") + .await + .unwrap(); - let bob_fut = join_all(vec![ - create_account_on_node(node2_http, alice_on_bob, "admin"), - create_account_on_node(node2_http, charlie_on_bob, "admin"), - ]); + delay(1000).await; - let mut node2_finish_sender = finish_sender; - runtime.spawn( - node2 - .serve() - .and_then(move |_| bob_fut) - .and_then(move |_| { - let client = reqwest::r#async::Client::new(); - client - .put(&format!("http://localhost:{}/rates", node2_http)) - .header("Authorization", "Bearer admin") - .json(&json!({"ABC": 1, "XYZ": 2})) - .send() - .map_err(|err| panic!(err)) - .and_then(move |res| { - res.error_for_status() - .expect("Error setting exchange rates"); - node2_finish_sender - .try_send(2) - .expect("Could not send message from node_2"); - Ok(()) - }) - }) - .instrument(error_span!(target: "interledger", "node2")), - ); + let get_balances = move || { + futures::future::join_all(vec![ + get_balance("alice_on_a", node1_http, "admin"), + get_balance("charlie_on_b", node2_http, "admin"), + get_balance("charlie_on_c", node3_http, "admin"), + ]) + }; - // We execute the futures one after the other to avoid race conditions where - // Bob gets added before the node's main account - let charlie_fut = create_account_on_node(node3_http, charlie_on_charlie, "admin") - .and_then(move |_| create_account_on_node(node3_http, bob_on_charlie, "admin")); + // Node 1 sends 1000 to Node 3. However, Node1's scale is 9, + // while Node 3's scale is 6. This means that Node 3 will + // see 1000x less. In addition, the conversion rate is 2:1 + // for 3's asset, so he will receive 2 total. + let receipt = send_money_to_username( + node1_http, + node3_http, + 1000, + "charlie_on_c", + "alice_on_a", + "default account holder", + ) + .await + .unwrap(); - runtime - .block_on( - node3 - .serve() - .and_then(move |_| finish_receiver.collect()) - .and_then(move |messages| { - debug!( - target: LOG_TARGET, - "Received finish messages: {:?}", messages - ); - charlie_fut - }) - .instrument(error_span!(target: "interledger", "node3")) - // we wait some time after the node is up so that we get the - // necessary routes from bob - .and_then(move |_| { - delay(1000).map_err(|_| panic!("Something strange happened when `delay`")) - }) - .and_then(move |_| { - let send_1_to_3 = send_money_to_username( - node1_http, - node3_http, - 1000, - "charlie_on_c", - "alice_on_a", - "default account holder", - ); - let send_3_to_1 = send_money_to_username( - node3_http, - node1_http, - 1000, - "alice_on_a", - "charlie_on_c", - "default account holder", - ); + assert_eq!( + receipt.from, + Address::from_str("example.alice").unwrap(), + "Payment receipt incorrect (1)" + ); + assert!(receipt + .to + .to_string() + .starts_with("example.bob.charlie_on_b.charlie_on_c.")); + assert_eq!(receipt.sent_asset_code, "XYZ"); + assert_eq!(receipt.sent_asset_scale, 9); + assert_eq!(receipt.sent_amount, 1000); + assert_eq!(receipt.delivered_asset_code.unwrap(), "ABC"); + assert_eq!(receipt.delivered_amount, 2); + assert_eq!(receipt.delivered_asset_scale.unwrap(), 6); + let ret = get_balances().await; + let ret: Vec<_> = ret.into_iter().map(|r| r.unwrap()).collect(); + // -1000 divided by asset scale 9 + assert_eq!( + ret[0], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: -1e-6 + } + ); + // 2 divided by asset scale 6 + assert_eq!( + ret[1], + BalanceData { + asset_code: "ABC".to_owned(), + balance: 2e-6 + } + ); + // 2 divided by asset scale 6 + assert_eq!( + ret[2], + BalanceData { + asset_code: "ABC".to_owned(), + balance: 2e-6 + } + ); - let get_balances = move || { - futures::future::join_all(vec![ - get_balance("alice_on_a", node1_http, "admin"), - get_balance("charlie_on_b", node2_http, "admin"), - get_balance("charlie_on_c", node3_http, "admin"), - ]) - }; + // Charlie sends to Alice + let receipt = send_money_to_username( + node3_http, + node1_http, + 1000, + "alice_on_a", + "charlie_on_c", + "default account holder", + ) + .await + .unwrap(); - // Node 1 sends 1000 to Node 3. However, Node1's scale is 9, - // while Node 3's scale is 6. This means that Node 3 will - // see 1000x less. In addition, the conversion rate is 2:1 - // for 3's asset, so he will receive 2 total. - send_1_to_3 - .map_err(|err| { - eprintln!("Error sending from node 1 to node 3: {:?}", err); - err - }) - .and_then(move |receipt: StreamDelivery| { - debug!(target: LOG_TARGET, "send_1_to_3 receipt: {:?}", receipt); - assert_eq!( - receipt.from, - Address::from_str("example.alice").unwrap(), - "Payment receipt incorrect (1)" - ); - assert!(receipt - .to - .to_string() - .starts_with("example.bob.charlie_on_b.charlie_on_c.")); - assert_eq!(receipt.sent_asset_code, "XYZ"); - assert_eq!(receipt.sent_asset_scale, 9); - assert_eq!(receipt.sent_amount, 1000); - assert_eq!(receipt.delivered_asset_code.unwrap(), "ABC"); - assert_eq!(receipt.delivered_amount, 2); - assert_eq!(receipt.delivered_asset_scale.unwrap(), 6); - get_balances().and_then(move |ret| { - // -1000 divided by asset scale 9 - assert_eq!( - ret[0], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: -1e-6 - } - ); - // 2 divided by asset scale 6 - assert_eq!( - ret[1], - BalanceData { - asset_code: "ABC".to_owned(), - balance: 2e-6 - } - ); - // 2 divided by asset scale 6 - assert_eq!( - ret[2], - BalanceData { - asset_code: "ABC".to_owned(), - balance: 2e-6 - } - ); - Ok(()) - }) - }) - .and_then(move |_| { - send_3_to_1.map_err(|err| { - eprintln!("Error sending from node 3 to node 1: {:?}", err); - err - }) - }) - .and_then(move |receipt| { - debug!(target: LOG_TARGET, "send_3_to_1 receipt: {:?}", receipt); - assert_eq!( - receipt.from, - Address::from_str("example.bob.charlie_on_b.charlie_on_c").unwrap(), - "Payment receipt incorrect (2)" - ); - assert!(receipt.to.to_string().starts_with("example.alice")); - assert_eq!(receipt.sent_asset_code, "ABC"); - assert_eq!(receipt.sent_asset_scale, 6); - assert_eq!(receipt.sent_amount, 1000); - assert_eq!(receipt.delivered_asset_code.unwrap(), "XYZ"); - assert_eq!(receipt.delivered_amount, 500_000); - assert_eq!(receipt.delivered_asset_scale.unwrap(), 9); - get_balances().and_then(move |ret| { - // 499,000 divided by asset scale 9 - assert_eq!( - ret[0], - BalanceData { - asset_code: "XYZ".to_owned(), - balance: 499e-6 - } - ); - // -998 divided by asset scale 6 - assert_eq!( - ret[1], - BalanceData { - asset_code: "ABC".to_owned(), - balance: -998e-6 - } - ); - // -998 divided by asset scale 6 - assert_eq!( - ret[2], - BalanceData { - asset_code: "ABC".to_owned(), - balance: -998e-6 - } - ); - Ok(()) - }) - }) - }), - ) - .map_err(|err| { - eprintln!("Error executing tests: {:?}", err); - err - }) - .unwrap(); + assert_eq!( + receipt.from, + Address::from_str("example.bob.charlie_on_b.charlie_on_c").unwrap(), + "Payment receipt incorrect (2)" + ); + assert!(receipt.to.to_string().starts_with("example.alice")); + assert_eq!(receipt.sent_asset_code, "ABC"); + assert_eq!(receipt.sent_asset_scale, 6); + assert_eq!(receipt.sent_amount, 1000); + assert_eq!(receipt.delivered_asset_code.unwrap(), "XYZ"); + assert_eq!(receipt.delivered_amount, 500_000); + assert_eq!(receipt.delivered_asset_scale.unwrap(), 9); + let ret = get_balances().await; + let ret: Vec<_> = ret.into_iter().map(|r| r.unwrap()).collect(); + // 499,000 divided by asset scale 9 + assert_eq!( + ret[0], + BalanceData { + asset_code: "XYZ".to_owned(), + balance: 499e-6 + } + ); + // -998 divided by asset scale 6 + assert_eq!( + ret[1], + BalanceData { + asset_code: "ABC".to_owned(), + balance: -998e-6 + } + ); + // -998 divided by asset scale 6 + assert_eq!( + ret[2], + BalanceData { + asset_code: "ABC".to_owned(), + balance: -998e-6 + } + ); } diff --git a/crates/interledger-api/Cargo.toml b/crates/interledger-api/Cargo.toml index 6d32c8850..d692a50b6 100644 --- a/crates/interledger-api/Cargo.toml +++ b/crates/interledger-api/Cargo.toml @@ -8,10 +8,10 @@ edition = "2018" repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] -bytes = { version = "0.4.12", default-features = false } -futures = { version = "0.1.29", default-features = false } -futures-retry = { version = "0.3.3", default-features = false } -http = { version = "0.1.18", default-features = false } +bytes = { version = "0.5", default-features = false } +futures = { version = "0.3.1", default-features = false } +futures-retry = { version = "0.4", default-features = false } +http = { version = "0.2", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-http = { path = "../interledger-http", version = "^0.4.0", default-features = false } interledger-ildcp = { path = "../interledger-ildcp", version = "^0.4.0", default-features = false } @@ -26,13 +26,17 @@ interledger-btp = { path = "../interledger-btp", version = "^0.4.0", default-fea log = { version = "0.4.8", default-features = false } serde = { version = "1.0.101", default-features = false, features = ["derive"] } serde_json = { version = "1.0.41", default-features = false } -serde_path_to_error = { version = "0.1", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +serde_path_to_error = { version = "0.1.2", default-features = false } +reqwest = { version = "0.10", default-features = false, features = ["default-tls", "json"] } url = { version = "2.1.0", default-features = false, features = ["serde"] } uuid = { version = "0.8.1", default-features = false} -warp = { version = "0.1.20", default-features = false } -secrecy = { version = "0.5.2", default-features = false, features = ["serde"] } +warp = { version = "0.2", default-features = false } +secrecy = { version = "0.6", default-features = false, features = ["serde"] } lazy_static = "1.4.0" +async-trait = "0.1.22" + +[dev-dependencies] +tokio = { version = "0.2.9", features = ["rt-core", "macros"] } [badges] circle-ci = { repository = "interledger-rs/interledger-rs" } diff --git a/crates/interledger-api/src/http_retry.rs b/crates/interledger-api/src/http_retry.rs index 8fb35f03f..83f6be434 100644 --- a/crates/interledger-api/src/http_retry.rs +++ b/crates/interledger-api/src/http_retry.rs @@ -1,9 +1,9 @@ // Adapted from the futures-retry example: https://gitlab.com/mexus/futures-retry/blob/master/examples/tcp-client-complex.rs -use futures::future::Future; +use futures::TryFutureExt; use futures_retry::{ErrorHandler, FutureRetry, RetryPolicy}; use http::StatusCode; use log::trace; -use reqwest::r#async::Client as HttpClient; +use reqwest::Client as HttpClient; use serde_json::json; use std::{default::Default, fmt::Display, time::Duration}; use url::Url; @@ -28,12 +28,13 @@ impl Client { } } - pub fn create_engine_account( + pub async fn create_engine_account( &self, engine_url: Url, id: T, - ) -> impl Future { + ) -> Result { let mut se_url = engine_url.clone(); + let id: String = id.to_string(); se_url .path_segments_mut() .expect("Invalid settlement engine URL") @@ -46,26 +47,24 @@ impl Client { // The actual HTTP request which gets made to the engine let client = self.client.clone(); - let create_settlement_engine_account = move || { - client - .post(se_url.as_ref()) - .json(&json!({"id" : id.to_string()})) - .send() - .and_then(move |response| { - // If the account is not found on the peer's connector, the - // retry logic will not get triggered. When the counterparty - // tries to add the account, they will complete the handshake. - Ok(response.status()) - }) - }; - FutureRetry::new( - create_settlement_engine_account, - IoHandler::new( - self.max_retries, - format!("[Engine: {}, Account: {}]", engine_url, id), - ), + // If the account is not found on the peer's connector, the + // retry logic will not get triggered. When the counterparty + // tries to add the account, they will complete the handshake. + + let msg = format!("[Engine: {}, Account: {}]", engine_url, id); + let res = FutureRetry::new( + move || { + client + .post(se_url.as_ref()) + .json(&json!({ "id": id })) + .send() + .map_ok(move |response| response.status()) + }, + IoHandler::new(self.max_retries, msg), ) + .await?; + Ok(res) } } @@ -111,12 +110,22 @@ where self.max_attempts ); - if e.is_client_error() { - // do not retry 4xx - RetryPolicy::ForwardError(e) - } else if e.is_timeout() || e.is_server_error() { - // Retry timeouts and 5xx every 5 seconds + // TODO: Should we make this policy more sophisticated? + + // Retry timeouts every 5s + if e.is_timeout() { RetryPolicy::WaitRetry(Duration::from_secs(5)) + } else if let Some(status) = e.status() { + if status.is_client_error() { + // do not retry 4xx + RetryPolicy::ForwardError(e) + } else if status.is_server_error() { + // Retry 5xx every 5 seconds + RetryPolicy::WaitRetry(Duration::from_secs(5)) + } else { + // Otherwise just retry every second + RetryPolicy::WaitRetry(Duration::from_secs(1)) + } } else { // Retry other errors slightly more frequently since they may be // related to the engine not having started yet diff --git a/crates/interledger-api/src/lib.rs b/crates/interledger-api/src/lib.rs index 7e85a47af..73fbd2270 100644 --- a/crates/interledger-api/src/lib.rs +++ b/crates/interledger-api/src/lib.rs @@ -1,5 +1,7 @@ +use async_trait::async_trait; use bytes::Bytes; -use futures::Future; +use interledger_btp::{BtpAccount, BtpOutgoingService}; +use interledger_ccp::CcpRoutingAccount; use interledger_http::{HttpAccount, HttpStore}; use interledger_packet::Address; use interledger_router::RouterStore; @@ -7,17 +9,15 @@ use interledger_service::{Account, AddressStore, IncomingService, OutgoingServic use interledger_service_util::{BalanceStore, ExchangeRateStore}; use interledger_settlement::core::types::{SettlementAccount, SettlementStore}; use interledger_stream::StreamNotificationsStore; +use secrecy::SecretString; use serde::{de, Deserialize, Serialize}; use std::{boxed::*, collections::HashMap, fmt::Display, net::SocketAddr, str::FromStr}; +use url::Url; use uuid::Uuid; use warp::{self, Filter}; -mod routes; -use interledger_btp::{BtpAccount, BtpOutgoingService}; -use interledger_ccp::CcpRoutingAccount; -use secrecy::SecretString; -use url::Url; pub(crate) mod http_retry; +mod routes; // This enum and the following functions are used to allow clients to send either // numbers or strings and have them be properly deserialized into the appropriate @@ -71,52 +71,57 @@ where // One argument against doing that is that the NodeStore allows admin-only // modifications to the values, whereas many of the other traits mostly // read from the configured values. +#[async_trait] pub trait NodeStore: AddressStore + Clone + Send + Sync + 'static { type Account: Account; - fn insert_account( - &self, - account: AccountDetails, - ) -> Box + Send>; + /// Inserts an account to the store. Generates a UUID and returns the full Account object. + async fn insert_account(&self, account: AccountDetails) -> Result; - fn delete_account(&self, id: Uuid) -> Box + Send>; + /// Deletes the account corresponding to the provided id and returns it + async fn delete_account(&self, id: Uuid) -> Result; - fn update_account( - &self, - id: Uuid, - account: AccountDetails, - ) -> Box + Send>; + /// Overwrites the account corresponding to the provided id with the provided details + async fn update_account(&self, id: Uuid, account: AccountDetails) -> Result; - fn modify_account_settings( + /// Modifies the account corresponding to the provided id with the provided settings. + /// `modify_account_settings` allows **users** to update their account settings with a set of + /// limited fields of account details. However `update_account` allows **admins** to fully + /// update account settings. + async fn modify_account_settings( &self, id: Uuid, settings: AccountSettings, - ) -> Box + Send>; + ) -> Result; // TODO limit the number of results and page through them - fn get_all_accounts(&self) -> Box, Error = ()> + Send>; + /// Gets all stored accounts + async fn get_all_accounts(&self) -> Result, ()>; - fn set_static_routes(&self, routes: R) -> Box + Send> + /// Sets the static routes for routing + async fn set_static_routes(&self, routes: R) -> Result<(), ()> where - R: IntoIterator; + // The 'async_trait lifetime is used after recommendation here: + // https://github.com/dtolnay/async-trait/issues/8#issuecomment-514812245 + R: IntoIterator + Send + 'async_trait; - fn set_static_route( - &self, - prefix: String, - account_id: Uuid, - ) -> Box + Send>; + /// Sets a single static route + async fn set_static_route(&self, prefix: String, account_id: Uuid) -> Result<(), ()>; - fn set_default_route(&self, account_id: Uuid) -> Box + Send>; + /// Sets the default route ("") to be the provided account id + /// (acts as a catch-all route if all other routes don't match) + async fn set_default_route(&self, account_id: Uuid) -> Result<(), ()>; - fn set_settlement_engines( + /// Sets the default settlement engines to be used for the provided asset codes + async fn set_settlement_engines( &self, - asset_to_url_map: impl IntoIterator, - ) -> Box + Send>; + // The 'async_trait lifetime is used after recommendation here: + // https://github.com/dtolnay/async-trait/issues/8#issuecomment-514812245 + asset_to_url_map: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()>; - fn get_asset_settlement_engine( - &self, - asset_code: &str, - ) -> Box, Error = ()> + Send>; + /// Gets the default settlement engine for the provided asset code + async fn get_asset_settlement_engine(&self, asset_code: &str) -> Result, ()>; } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -131,17 +136,27 @@ pub struct ExchangeRates( /// their HTTP/BTP endpoints, since they may change their network configuration. #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct AccountSettings { + /// The account's incoming ILP over HTTP token. pub ilp_over_http_incoming_token: Option, + /// The account's incoming ILP over BTP token. pub ilp_over_btp_incoming_token: Option, + /// The account's outgoing ILP over HTTP token pub ilp_over_http_outgoing_token: Option, + /// The account's outgoing ILP over BTP token. + /// This must match the ILP over BTP incoming token on the peer's node if exchanging + /// packets with that peer. pub ilp_over_btp_outgoing_token: Option, + /// The account's ILP over HTTP URL (this is where packets are sent over HTTP from your node) pub ilp_over_http_url: Option, + /// The account's ILP over BTP URL (this is where packets are sent over WebSockets from your node) pub ilp_over_btp_url: Option, + /// The threshold after which the balance service will trigger a settlement #[serde(default, deserialize_with = "optional_number_or_string")] pub settle_threshold: Option, - // Note that this is intentionally an unsigned integer because users should - // not be able to set the settle_to value to be negative (meaning the node - // would pre-fund with the user) + /// The amount which the balance service will attempt to settle down to. + /// Note that this is intentionally an unsigned integer because users should + /// not be able to set the settle_to value to be negative (meaning the node + /// would pre-fund with the user) #[serde(default, deserialize_with = "optional_number_or_string")] pub settle_to: Option, } @@ -159,45 +174,82 @@ pub struct EncryptedAccountSettings { pub ilp_over_http_url: Option, pub ilp_over_btp_url: Option, #[serde(default, deserialize_with = "optional_number_or_string")] + /// The threshold after which the balance service will trigger a settlement pub settle_threshold: Option, #[serde(default, deserialize_with = "optional_number_or_string")] + /// The amount which the balance service will attempt to settle down to pub settle_to: Option, } /// The Account type for the RedisStore. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AccountDetails { + /// The account's Interledger Protocol address. + /// If none is provided, the node should generate one pub ilp_address: Option
, + /// The account's username pub username: Username, + /// The account's currency pub asset_code: String, #[serde(deserialize_with = "number_or_string")] + /// The account's asset scale pub asset_scale: u8, #[serde(default = "u64::max_value", deserialize_with = "number_or_string")] + /// The max amount per packet which can be routed for this account pub max_packet_amount: u64, + /// The minimum balance this account can have (consider this as a credit/trust limit) #[serde(default, deserialize_with = "optional_number_or_string")] pub min_balance: Option, + /// The account's ILP over HTTP URL (this is where packets are sent over HTTP from your node) pub ilp_over_http_url: Option, + /// The account's API and incoming ILP over HTTP token. + /// This must match the ILP over HTTP outgoing token on the peer's node if receiving + /// packets from that peer + // TODO: The incoming token is used for both ILP over HTTP, and for authorizing actions from the HTTP API. + // Should we add 1 more token, for more granular permissioning? pub ilp_over_http_incoming_token: Option, + /// The account's outgoing ILP over HTTP token + /// This must match the ILP over HTTP incoming token on the peer's node if sending + /// packets to that peer pub ilp_over_http_outgoing_token: Option, + /// The account's ILP over BTP URL (this is where packets are sent over WebSockets from your node) pub ilp_over_btp_url: Option, + /// The account's outgoing ILP over BTP token. + /// This must match the ILP over BTP incoming token on the peer's node if exchanging + /// packets with that peer. pub ilp_over_btp_outgoing_token: Option, + /// The account's incoming ILP over BTP token. + /// This must match the ILP over BTP outgoing token on the peer's node if exchanging + /// packets with that peer. pub ilp_over_btp_incoming_token: Option, + /// The threshold after which the balance service will trigger a settlement #[serde(default, deserialize_with = "optional_number_or_string")] pub settle_threshold: Option, + /// The amount which the balance service will attempt to settle down to #[serde(default, deserialize_with = "optional_number_or_string")] pub settle_to: Option, + /// The routing relation of the account pub routing_relation: Option, + /// The round trip time of the account (should be set depending on how + /// well the network connectivity of the account and the node is) #[serde(default, deserialize_with = "optional_number_or_string")] pub round_trip_time: Option, + /// The maximum amount the account can send per minute #[serde(default, deserialize_with = "optional_number_or_string")] pub amount_per_minute_limit: Option, + /// The limit of packets the account can send per minute #[serde(default, deserialize_with = "optional_number_or_string")] pub packets_per_minute_limit: Option, + /// The account's settlement engine URL. If a global engine url is configured + /// for the account's asset code, that will be used instead (even if the account is + /// configured with a specific one) pub settlement_engine_url: Option, } pub struct NodeApi { store: S, + /// The admin's API token, used to make admin-only changes + // TODO: Make this a SecretString admin_api_token: String, default_spsp_account: Option, incoming_handler: I, @@ -207,6 +259,7 @@ pub struct NodeApi { // The BTP service is included here so that we can add a new client // connection when an account is added with BTP details btp: BtpOutgoingService, + /// Server secret used to instantiate SPSP/Stream connections server_secret: Bytes, node_version: Option, } @@ -253,16 +306,21 @@ where } } + /// Sets the default SPSP account. When SPSP payments are sent to the root domain, + /// the payment pointer is resolved to /.well-known/pay. This value determines + /// which account those payments will be sent to. pub fn default_spsp_account(&mut self, username: Username) -> &mut Self { self.default_spsp_account = Some(username); self } + /// Sets the node version pub fn node_version(&mut self, version: String) -> &mut Self { self.node_version = Some(version); self } + /// Returns a Warp Filter which exposes the accounts and admin APIs pub fn into_warp_filter(self) -> warp::filters::BoxedFilter<(impl warp::Reply,)> { routes::accounts_api( self.server_secret, @@ -281,8 +339,9 @@ where .boxed() } - pub fn bind(self, addr: SocketAddr) -> impl Future { - warp::serve(self.into_warp_filter()).bind(addr) + /// Serves the API at the provided address + pub async fn bind(self, addr: SocketAddr) { + warp::serve(self.into_warp_filter()).bind(addr).await } } diff --git a/crates/interledger-api/src/routes/accounts.rs b/crates/interledger-api/src/routes/accounts.rs index bca63e768..85f5d5361 100644 --- a/crates/interledger-api/src/routes/accounts.rs +++ b/crates/interledger-api/src/routes/accounts.rs @@ -1,9 +1,6 @@ use crate::{http_retry::Client, number_or_string, AccountDetails, AccountSettings, NodeStore}; use bytes::Bytes; -use futures::{ - future::{err, join_all, ok, Either}, - Future, Stream, -}; +use futures::{future::join_all, Future, FutureExt, StreamExt, TryFutureExt}; use interledger_btp::{connect_to_service_account, BtpAccount, BtpOutgoingService}; use interledger_ccp::{CcpRoutingAccount, Mode, RouteControlRequest, RoutingRelation}; use interledger_http::{deserialize_json, error::*, HttpAccount, HttpStore}; @@ -23,7 +20,7 @@ use serde::{Deserialize, Serialize}; use serde_json::json; use std::convert::TryFrom; use uuid::Uuid; -use warp::{self, Filter, Rejection}; +use warp::{self, reply::Json, Filter, Rejection}; pub const BEARER_TOKEN_START: usize = 7; @@ -64,140 +61,111 @@ where + 'static, { // TODO can we make any of the Filters const or put them in lazy_static? + let with_store = warp::any().map(move || store.clone()).boxed(); + let with_incoming_handler = warp::any().map(move || incoming_handler.clone()).boxed(); // Helper filters let admin_auth_header = format!("Bearer {}", admin_api_token); + let admin_auth_header_clone = admin_auth_header.clone(); + let with_admin_auth_header = warp::any().map(move || admin_auth_header.clone()).boxed(); let admin_only = warp::header::("authorization") - .and_then( - move |authorization: SecretString| -> Result<(), Rejection> { + .and_then(move |authorization: SecretString| { + let admin_auth_header = admin_auth_header_clone.clone(); + async move { if authorization.expose_secret() == &admin_auth_header { - Ok(()) + Ok::<(), Rejection>(()) } else { - Err(ApiError::unauthorized().into()) + Err(Rejection::from(ApiError::unauthorized())) } - }, - ) + } + }) // This call makes it so we do not pass on a () value on // success to the next filter, it just gets rid of it .untuple_one() .boxed(); - let with_store = warp::any().map(move || store.clone()).boxed(); - let admin_auth_header = format!("Bearer {}", admin_api_token); - let with_admin_auth_header = warp::any().map(move || admin_auth_header.clone()).boxed(); - let with_incoming_handler = warp::any().map(move || incoming_handler.clone()).boxed(); - // Note that the following path filters should be applied before others - // (such as method and authentication) to avoid triggering unexpected errors for requests - // that do not match this path. - let accounts = warp::path("accounts"); - let accounts_index = accounts.and(warp::path::end()); - // This is required when using `admin_or_authorized_user_only` or `authorized_user_only` filter. - // Sets Username from path into ext for context. - let account_username = accounts - .and(warp::path::param2::()) - .and_then(|username: Username| -> Result<_, Rejection> { - warp::filters::ext::set(username); - Ok(()) - }) - .untuple_one() - .boxed(); - let account_username_to_id = accounts - .and(warp::path::param2::()) + + // Converts an account username to an account id or errors out + let account_username_to_id = warp::path::param::() .and(with_store.clone()) - .and_then(|username: Username, store: S| { - store - .get_account_id_from_username(&username) - .map_err::<_, Rejection>(move |_| { - // TODO differentiate between server error and not found - error!("Error getting account id from username: {}", username); - ApiError::account_not_found().into() - }) + .and_then(move |username: Username, store: S| { + async move { + store + .get_account_id_from_username(&username) + .map_err(|_| { + // TODO differentiate between server error and not found + error!("Error getting account id from username: {}", username); + Rejection::from(ApiError::account_not_found()) + }) + .await + } }) .boxed(); - // Receives parameters which were prepared by `account_username` and - // considers the request is eligible to be processed or not, checking the auth. - // Why we separate `account_username` and this filter is that - // we want to check whether the sender is eligible to access this path but at the same time, - // we don't want to spawn any `Rejection`s at `account_username`. - // At the point of `account_username`, there might be some other - // remaining path filters. So we have to process those first, not to spawn errors of - // unauthorized that the the request actually should not cause. - // This function needs parameters which can be prepared by `account_username`. - let admin_or_authorized_user_only = warp::filters::ext::get::() + let is_authorized_user = move |store: S, path_username: Username, auth_string: SecretString| { + async move { + if auth_string.expose_secret().len() < BEARER_TOKEN_START { + return Err(Rejection::from(ApiError::bad_request())); + } + + // Try getting the account from the store + let authorized_account = store + .get_account_from_http_auth( + &path_username, + &auth_string.expose_secret()[BEARER_TOKEN_START..], + ) + .map_err(|_| Rejection::from(ApiError::unauthorized())) + .await?; + + // Only return the account if the provided username matched the fetched one + // This maybe is redundant? + if &path_username == authorized_account.username() { + Ok(authorized_account) + } else { + Err(ApiError::unauthorized().into()) + } + } + }; + + // Checks if the account is an admin or if they have provided a valid password + let admin_or_authorized_user_only = warp::path::param::() .and(warp::header::("authorization")) .and(with_store.clone()) - .and(with_admin_auth_header.clone()) + .and(with_admin_auth_header) .and_then( - |path_username: Username, - auth_string: SecretString, - store: S, - admin_auth_header: String| { - if auth_string.expose_secret().len() < BEARER_TOKEN_START { - return Either::A(err(ApiError::bad_request().into())); + move |path_username: Username, + auth_string: SecretString, + store: S, + admin_auth_header: String| { + async move { + // If it's an admin, there's no need for more checks + if auth_string.expose_secret() == &admin_auth_header { + let account_id = store + .get_account_id_from_username(&path_username) + .map_err(|_| { + // TODO differentiate between server error and not found + error!("Error getting account id from username: {}", path_username); + Rejection::from(ApiError::account_not_found()) + }) + .await?; + return Ok(account_id); + } + let account = is_authorized_user(store, path_username, auth_string).await?; + Ok::(account.id()) } - Either::B(store.get_account_id_from_username(&path_username).then( - move |account_id: Result| { - if account_id.is_err() { - return Either::A(err::( - ApiError::account_not_found().into(), - )); - } - let account_id = account_id.unwrap(); - if auth_string.expose_secret() == &admin_auth_header { - return Either::A(ok(account_id)); - } - Either::B( - store - .get_account_from_http_auth( - &path_username, - &auth_string.expose_secret()[BEARER_TOKEN_START..], - ) - .then(move |authorized_account: Result| { - if authorized_account.is_err() { - return err(ApiError::unauthorized().into()); - } - let authorized_account = authorized_account.unwrap(); - if &path_username == authorized_account.username() { - ok(authorized_account.id()) - } else { - err(ApiError::unauthorized().into()) - } - }), - ) - }, - )) }, ) .boxed(); - // The same structure as `admin_or_authorized_user_only`. - // This function needs parameters which can be prepared by `account_username`. - let authorized_user_only = warp::filters::ext::get::() + // Checks if the account has provided a valid password (same as admin-or-auth call, minus one call, can we refactor them together?) + let authorized_user_only = warp::path::param::() .and(warp::header::("authorization")) .and(with_store.clone()) .and_then( - |path_username: Username, auth_string: SecretString, store: S| { - if auth_string.expose_secret().len() < BEARER_TOKEN_START { - return Either::A(err(ApiError::bad_request().into())); + move |path_username: Username, auth_string: SecretString, store: S| { + async move { + let account = is_authorized_user(store, path_username, auth_string).await?; + Ok::(account) } - Either::B( - store - .get_account_from_http_auth( - &path_username, - &auth_string.expose_secret()[BEARER_TOKEN_START..], - ) - .then(move |authorized_account: Result| { - if authorized_account.is_err() { - return err::(ApiError::unauthorized().into()); - } - let authorized_account = authorized_account.unwrap(); - if &path_username == authorized_account.username() { - ok(authorized_account) - } else { - err(ApiError::unauthorized().into()) - } - }), - ) }, ) .boxed(); @@ -205,261 +173,283 @@ where // POST /accounts let btp_clone = btp.clone(); let outgoing_handler_clone = outgoing_handler.clone(); - let post_accounts = warp::post2() - .and(accounts_index) + let post_accounts = warp::post() + .and(warp::path("accounts")) + .and(warp::path::end()) .and(admin_only.clone()) - .and(deserialize_json()) + .and(deserialize_json()) // Why does warp::body::json not work? .and(with_store.clone()) .and_then(move |account_details: AccountDetails, store: S| { let store_clone = store.clone(); let handler = outgoing_handler_clone.clone(); let btp = btp_clone.clone(); - store - .insert_account(account_details.clone()) - .map_err(move |_| { - error!("Error inserting account into store: {:?}", account_details); - // TODO need more information - ApiError::internal_server_error().into() - }) - .and_then(move |account| { - connect_to_external_services(handler, account, store_clone, btp) - }) - .and_then(|account: A| Ok(warp::reply::json(&account))) + async move { + let account = store + .insert_account(account_details.clone()) + .map_err(move |_| { + error!("Error inserting account into store: {:?}", account_details); + // TODO need more information + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + + connect_to_external_services(handler, account.clone(), store_clone, btp).await?; + Ok::(warp::reply::json(&account)) + } }) .boxed(); // GET /accounts - let get_accounts = warp::get2() - .and(accounts_index) + let get_accounts = warp::get() + .and(warp::path("accounts")) + .and(warp::path::end()) .and(admin_only.clone()) .and(with_store.clone()) .and_then(|store: S| { - store - .get_all_accounts() - .map_err::<_, Rejection>(|_| ApiError::internal_server_error().into()) - .and_then(|accounts| Ok(warp::reply::json(&accounts))) + async move { + let accounts = store + .get_all_accounts() + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + Ok::(warp::reply::json(&accounts)) + } }) .boxed(); // PUT /accounts/:username - let put_account = warp::put2() + let put_account = warp::put() + .and(warp::path("accounts")) .and(account_username_to_id.clone()) .and(warp::path::end()) .and(admin_only.clone()) - .and(deserialize_json()) + .and(deserialize_json()) // warp::body::json() is not able to decode this! .and(with_store.clone()) .and_then(move |id: Uuid, account_details: AccountDetails, store: S| { - let store_clone = store.clone(); - let handler = outgoing_handler.clone(); + let outgoing_handler = outgoing_handler.clone(); let btp = btp.clone(); - store - .update_account(id, account_details) - .map_err::<_, Rejection>(move |_| ApiError::internal_server_error().into()) - .and_then(move |account| { - connect_to_external_services(handler, account, store_clone, btp) - }) - .and_then(|account: A| Ok(warp::reply::json(&account))) + async move { + let account = store + .update_account(id, account_details) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + connect_to_external_services(outgoing_handler, account.clone(), store, btp).await?; + + Ok::(warp::reply::json(&account)) + } }) .boxed(); // GET /accounts/:username - let get_account = warp::get2() - .and(account_username.clone()) - .and(warp::path::end()) + let get_account = warp::get() + .and(warp::path("accounts")) + // takes the username and the authorization header and checks if it's authorized, returns the uid .and(admin_or_authorized_user_only.clone()) + .and(warp::path::end()) .and(with_store.clone()) .and_then(|id: Uuid, store: S| { - store - .get_accounts(vec![id]) - .map_err::<_, Rejection>(|_| ApiError::account_not_found().into()) - .and_then(|accounts| Ok(warp::reply::json(&accounts[0]))) + async move { + let accounts = store + .get_accounts(vec![id]) + .map_err(|_| Rejection::from(ApiError::account_not_found())) + .await?; + + Ok::(warp::reply::json(&accounts[0])) + } }) .boxed(); // GET /accounts/:username/balance - let get_account_balance = warp::get2() - .and(account_username.clone()) + let get_account_balance = warp::get() + .and(warp::path("accounts")) + // takes the username and the authorization header and checks if it's authorized, returns the uid + .and(admin_or_authorized_user_only.clone()) .and(warp::path("balance")) .and(warp::path::end()) - .and(admin_or_authorized_user_only.clone()) .and(with_store.clone()) .and_then(|id: Uuid, store: S| { - // TODO reduce the number of store calls it takes to get the balance - store - .get_accounts(vec![id]) - .map_err(|_| warp::reject::not_found()) - .and_then(move |mut accounts| { - let account = accounts.pop().unwrap(); - let acc_clone = account.clone(); - let asset_scale = acc_clone.asset_scale(); - let asset_code = acc_clone.asset_code().to_owned(); - store - .get_balance(account) - .map_err(move |_| { - error!("Error getting balance for account: {}", id); - ApiError::internal_server_error().into() - }) - .and_then(move |balance: i64| { - Ok(warp::reply::json(&json!({ - // normalize to the base unit - "balance": balance as f64 / 10_u64.pow(asset_scale.into()) as f64, - "asset_code": asset_code, - }))) - }) - }) + async move { + // TODO reduce the number of store calls it takes to get the balance + let mut accounts = store + .get_accounts(vec![id]) + .map_err(|_| warp::reject::not_found()) + .await?; + let account = accounts.pop().unwrap(); + + let balance = store + .get_balance(account.clone()) + .map_err(move |_| { + error!("Error getting balance for account: {}", id); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + + let asset_scale = account.asset_scale(); + let asset_code = account.asset_code().to_owned(); + Ok::(warp::reply::json(&json!({ + // normalize to the base unit + "balance": balance as f64 / 10_u64.pow(asset_scale.into()) as f64, + "asset_code": asset_code, + }))) + } }) .boxed(); // DELETE /accounts/:username - let delete_account = warp::delete2() + let delete_account = warp::delete() + .and(warp::path("accounts")) .and(account_username_to_id.clone()) .and(warp::path::end()) - .and(admin_only.clone()) + .and(admin_only) .and(with_store.clone()) .and_then(|id: Uuid, store: S| { - store - .delete_account(id) - .map_err::<_, Rejection>(move |_| { - error!("Error deleting account {}", id); - ApiError::internal_server_error().into() - }) - .and_then(|account| Ok(warp::reply::json(&account))) + async move { + let account = store + .delete_account(id) + .map_err(|_| { + error!("Error deleting account {}", id); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + Ok::(warp::reply::json(&account)) + } }) .boxed(); // PUT /accounts/:username/settings - let put_account_settings = warp::put2() - .and(account_username.clone()) + let put_account_settings = warp::put() + .and(warp::path("accounts")) + .and(admin_or_authorized_user_only.clone()) .and(warp::path("settings")) .and(warp::path::end()) - .and(admin_or_authorized_user_only.clone()) .and(deserialize_json()) .and(with_store.clone()) .and_then(|id: Uuid, settings: AccountSettings, store: S| { - store - .modify_account_settings(id, settings) - .map_err::<_, Rejection>(move |_| { - error!("Error updating account settings {}", id); - ApiError::internal_server_error().into() - }) - .and_then(|settings| Ok(warp::reply::json(&settings))) + async move { + let modified_account = store + .modify_account_settings(id, settings) + .map_err(move |_| { + error!("Error updating account settings {}", id); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + Ok::(warp::reply::json(&modified_account)) + } }) .boxed(); // (Websocket) /accounts/:username/payments/incoming - let incoming_payment_notifications = account_username - .clone() + let incoming_payment_notifications = warp::path("accounts") + .and(admin_or_authorized_user_only) .and(warp::path("payments")) .and(warp::path("incoming")) .and(warp::path::end()) - .and(admin_or_authorized_user_only.clone()) - .and(warp::ws2()) + .and(warp::ws()) .and(with_store.clone()) - .map(|id: Uuid, ws: warp::ws::Ws2, store: S| { + .map(|id: Uuid, ws: warp::ws::Ws, store: S| { ws.on_upgrade(move |ws: warp::ws::WebSocket| { - let (tx, rx) = futures::sync::mpsc::unbounded::(); - store.add_payment_notification_subscription(id, tx); - rx.map_err(|_| -> warp::Error { unreachable!("unbounded rx never errors") }) - .map(|notification| { - warp::ws::Message::text(serde_json::to_string(¬ification).unwrap()) - }) - .forward(ws) - .map(|_| ()) - .map_err(|err| error!("Error forwarding notifications to websocket: {:?}", err)) + notify_user(ws, id, store).map(|result| result.unwrap()) }) }) .boxed(); // POST /accounts/:username/payments - let post_payments = warp::post2() - .and(account_username.clone()) + let post_payments = warp::post() + .and(warp::path("accounts")) + .and(authorized_user_only) .and(warp::path("payments")) .and(warp::path::end()) - .and(authorized_user_only.clone()) .and(deserialize_json()) - .and(with_incoming_handler.clone()) + .and(with_incoming_handler) .and_then( move |account: A, pay_request: SpspPayRequest, incoming_handler: I| { - pay( - incoming_handler, - account.clone(), - &pay_request.receiver, - pay_request.source_amount, - ) - .and_then(move |receipt| { + async move { + let receipt = pay( + incoming_handler, + account.clone(), + &pay_request.receiver, + pay_request.source_amount, + ) + .map_err(|err| { + error!("Error sending SPSP payment: {:?}", err); + // TODO give a different error message depending on what type of error it is + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + debug!("Sent SPSP payment, receipt: {:?}", receipt); - Ok(warp::reply::json(&json!(receipt))) - }) - .map_err::<_, Rejection>(|err| { - error!("Error sending SPSP payment: {:?}", err); - // TODO give a different error message depending on what type of error it is - ApiError::internal_server_error().into() - }) + Ok::(warp::reply::json(&json!(receipt))) + } }, ) .boxed(); // GET /accounts/:username/spsp let server_secret_clone = server_secret.clone(); - let get_spsp = warp::get2() - .and(account_username_to_id.clone()) + let get_spsp = warp::get() + .and(warp::path("accounts")) + .and(account_username_to_id) .and(warp::path("spsp")) .and(warp::path::end()) .and(with_store.clone()) .and_then(move |id: Uuid, store: S| { let server_secret_clone = server_secret_clone.clone(); - store - .get_accounts(vec![id]) - .map_err::<_, Rejection>(|_| ApiError::internal_server_error().into()) - .and_then(move |accounts| { - // TODO return the response without instantiating an SpspResponder (use a simple fn) - Ok(SpspResponder::new( + async move { + let accounts = store + .get_accounts(vec![id]) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + // TODO return the response without instantiating an SpspResponder (use a simple fn) + Ok::<_, Rejection>( + SpspResponder::new( accounts[0].ilp_address().clone(), server_secret_clone.clone(), ) - .generate_http_response()) - }) + .generate_http_response(), + ) + } }) .boxed(); // GET /.well-known/pay // This is the endpoint a [Payment Pointer](https://github.com/interledger/rfcs/blob/master/0026-payment-pointers/0026-payment-pointers.md) // with no path resolves to - let server_secret_clone = server_secret.clone(); - let get_spsp_well_known = warp::get2() + let get_spsp_well_known = warp::get() .and(warp::path(".well-known")) .and(warp::path("pay")) .and(warp::path::end()) - .and(with_store.clone()) + .and(with_store) .and_then(move |store: S| { - // TODO don't clone this - if let Some(username) = default_spsp_account.clone() { - let server_secret_clone = server_secret_clone.clone(); - Either::A( - store + let default_spsp_account = default_spsp_account.clone(); + let server_secret_clone = server_secret.clone(); + async move { + // TODO don't clone this + if let Some(username) = default_spsp_account.clone() { + let id = store .get_account_id_from_username(&username) - .map_err(move |_| { + .map_err(|_| { error!("Account not found: {}", username); warp::reject::not_found() }) - .and_then(move |id| { - // TODO this shouldn't take multiple store calls - store - .get_accounts(vec![id]) - .map_err(|_| ApiError::internal_server_error().into()) - .map(|mut accounts| accounts.pop().unwrap()) - }) - .and_then(move |account| { - // TODO return the response without instantiating an SpspResponder (use a simple fn) - Ok(SpspResponder::new( - account.ilp_address().clone(), - server_secret_clone.clone(), - ) - .generate_http_response()) - }), - ) - } else { - Either::B(err(ApiError::not_found().into())) + .await?; + + // TODO this shouldn't take multiple store calls + let mut accounts = store + .get_accounts(vec![id]) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + + let account = accounts.pop().unwrap(); + // TODO return the response without instantiating an SpspResponder (use a simple fn) + Ok::<_, Rejection>( + SpspResponder::new( + account.ilp_address().clone(), + server_secret_clone.clone(), + ) + .generate_http_response(), + ) + } else { + Err(Rejection::from(ApiError::not_found())) + } } }) .boxed(); @@ -469,20 +459,46 @@ where .or(post_accounts) .or(get_accounts) .or(put_account) + .or(delete_account) .or(get_account) .or(get_account_balance) - .or(delete_account) .or(put_account_settings) .or(incoming_payment_notifications) .or(post_payments) .boxed() } -fn get_address_from_parent_and_update_routes( +fn notify_user( + socket: warp::ws::WebSocket, + id: Uuid, + store: impl StreamNotificationsStore, +) -> impl Future> { + let (tx, rx) = futures::channel::mpsc::unbounded::(); + // the client is now subscribed + store.add_payment_notification_subscription(id, tx); + + // Anytime something is written to tx, it will reach rx + // and get converted to a warp::ws::Message + let rx = rx.map(|notification: PaymentNotification| { + let msg = warp::ws::Message::text(serde_json::to_string(¬ification).unwrap()); + Ok(msg) + }); + + // Then it gets forwarded to the client + rx.forward(socket) + .map(|result| { + if let Err(e) = result { + eprintln!("websocket send error: {}", e); + } + }) + .then(futures::future::ok) +} + +async fn get_address_from_parent_and_update_routes( mut service: O, parent: A, store: S, -) -> impl Future +) -> Result<(), ()> where O: OutgoingService + Clone + Send + Sync + 'static, A: CcpRoutingAccount + Clone + Send + Sync + 'static, @@ -494,7 +510,7 @@ where parent.id() ); let prepare = IldcpRequest {}.to_prepare(); - service + let fulfill = service .send_request(OutgoingRequest { from: parent.clone(), // Does not matter what we put here, they will get the account from the HTTP/BTP credentials to: parent.clone(), @@ -502,54 +518,53 @@ where original_amount: 0, }) .map_err(|err| error!("Error getting ILDCP info: {:?}", err)) - .and_then(|fulfill| { - let response = IldcpResponse::try_from(fulfill.into_data().freeze()).map_err(|err| { - error!( - "Unable to parse ILDCP response from fulfill packet: {:?}", - err - ); - }); - debug!("Got ILDCP response from parent: {:?}", response); - let ilp_address = match response { - Ok(info) => info.ilp_address(), - Err(_) => return err(()), - }; - ok(ilp_address) - }) - .and_then(move |ilp_address| { - debug!("ILP address is now: {}", ilp_address); - // TODO we may want to make this trigger the CcpRouteManager to request - let prepare = RouteControlRequest { - mode: Mode::Sync, - last_known_epoch: 0, - last_known_routing_table_id: [0; 16], - features: Vec::new(), - } - .to_prepare(); - debug!("Asking for routes from {:?}", parent.clone()); - join_all(vec![ - // Set the parent to be the default route for everything - // that starts with their global prefix - store.set_default_route(parent.id()), - // Update our store's address - store.set_ilp_address(ilp_address), - // Get the parent's routes for us - Box::new( - service - .send_request(OutgoingRequest { - from: parent.clone(), - to: parent.clone(), - original_amount: prepare.amount(), - prepare: prepare.clone(), - }) - .and_then(move |_| Ok(())) - .map_err(move |err| { - error!("Got error when trying to update routes {:?}", err) - }), - ), - ]) - }) - .and_then(move |_| Ok(())) + .await?; + + let info = IldcpResponse::try_from(fulfill.into_data().freeze()).map_err(|err| { + error!( + "Unable to parse ILDCP response from fulfill packet: {:?}", + err + ); + })?; + debug!("Got ILDCP response from parent: {:?}", info); + let ilp_address = info.ilp_address(); + + debug!("ILP address is now: {}", ilp_address); + // TODO we may want to make this trigger the CcpRouteManager to request + let prepare = RouteControlRequest { + mode: Mode::Sync, + last_known_epoch: 0, + last_known_routing_table_id: [0; 16], + features: Vec::new(), + } + .to_prepare(); + + debug!("Asking for routes from {:?}", parent.clone()); + let ret = join_all(vec![ + // Set the parent to be the default route for everything + // that starts with their global prefix + store.set_default_route(parent.id()), + // Update our store's address + store.set_ilp_address(ilp_address), + // Get the parent's routes for us + Box::pin( + service + .send_request(OutgoingRequest { + from: parent.clone(), + to: parent.clone(), + original_amount: prepare.amount(), + prepare: prepare.clone(), + }) + .map_err(|_| ()) + .map_ok(|_| ()), + ), + ]) + .await; + // If any of the 3 futures errored, propagate the error outside + if ret.into_iter().any(|r| r.is_err()) { + return Err(()); + } + Ok(()) } // Helper function which gets called whenever a new account is added or @@ -562,12 +577,12 @@ where // 2b. Perform a RouteControl Request to make them send us any new routes // 3. If they have a settlement engine endpoitn configured: Make a POST to the // engine's account creation endpoint with the account's id -fn connect_to_external_services( +async fn connect_to_external_services( service: O, account: A, store: S, btp: BtpOutgoingService, -) -> impl Future +) -> Result where O: OutgoingService + Clone + Send + Sync + 'static, A: CcpRoutingAccount + BtpAccount + SettlementAccount + Clone + Send + Sync + 'static, @@ -576,158 +591,136 @@ where { // Try to connect to the account's BTP socket if they have // one configured - let btp_connect_fut = if account.get_ilp_over_btp_url().is_some() { + if account.get_ilp_over_btp_url().is_some() { trace!("Newly inserted account has a BTP URL configured, will try to connect"); - Either::A( - connect_to_service_account(account.clone(), true, btp) - .map_err(|_| ApiError::internal_server_error().into()), - ) - } else { - Either::B(ok(())) - }; + connect_to_service_account(account.clone(), true, btp) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await? + } + + // If we added a parent, get the address assigned to us by + // them and update all of our routes + if account.routing_relation() == RoutingRelation::Parent { + get_address_from_parent_and_update_routes(service, account.clone(), store.clone()) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + } + + // Register the account with the settlement engine + // if a settlement_engine_url was configured on the account + // or if there is a settlement engine configured for that + // account's asset_code + let default_settlement_engine = store + .get_asset_settlement_engine(account.asset_code()) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + + let settlement_engine_url = account + .settlement_engine_details() + .map(|details| details.url) + .or(default_settlement_engine); + if let Some(se_url) = settlement_engine_url { + let id = account.id(); + let http_client = Client::default(); + trace!( + "Sending account {} creation request to settlement engine: {:?}", + id, + se_url.clone() + ); - btp_connect_fut.and_then(move |_| { - // If we added a parent, get the address assigned to us by - // them and update all of our routes - let get_ilp_address_fut = if account.routing_relation() == RoutingRelation::Parent { - Either::A( - get_address_from_parent_and_update_routes(service, account.clone(), store.clone()) - .map_err(|_| ApiError::internal_server_error().into()) - ) + let status_code = http_client + .create_engine_account(se_url, id) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + + if status_code.is_success() { + trace!("Account {} created on the SE", id); } else { - Either::B(ok(())) - }; - - let default_settlement_engine_fut = store.get_asset_settlement_engine(account.asset_code()) - .map_err(|_| ApiError::internal_server_error().into()); - - // Register the account with the settlement engine - // if a settlement_engine_url was configured on the account - // or if there is a settlement engine configured for that - // account's asset_code - default_settlement_engine_fut.join(get_ilp_address_fut).and_then(move |(default_settlement_engine, _)| { - let settlement_engine_url = account.settlement_engine_details().map(|details| details.url).or(default_settlement_engine); - if let Some(se_url) = settlement_engine_url { - let id = account.id(); - let http_client = Client::default(); - trace!( - "Sending account {} creation request to settlement engine: {:?}", - id, - se_url.clone() - ); - Either::A( - http_client.create_engine_account(se_url, id) - .map_err(|_| ApiError::internal_server_error().into()) - .and_then(move |status_code| { - if status_code.is_success() { - trace!("Account {} created on the SE", id); - } else { - error!("Error creating account. Settlement engine responded with HTTP code: {}", status_code); - } - Ok(()) - }) - .and_then(move |_| { - Ok(account) - })) - } else { - Either::B(ok(account)) - } - }) - }) + error!( + "Error creating account. Settlement engine responded with HTTP code: {}", + status_code + ); + } + } + + Ok(account) } #[cfg(test)] mod tests { use crate::routes::test_helpers::*; + // TODO: Add test for GET /accounts/:username/spsp and /.well_known - #[test] - fn only_admin_can_create_account() { + #[tokio::test] + async fn only_admin_can_create_account() { let api = test_accounts_api(); - let resp = api_call(&api, "POST", "/accounts", "admin", DETAILS.clone()); + let resp = api_call(&api, "POST", "/accounts", "admin", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "POST", "/accounts", "wrong", DETAILS.clone()); + let resp = api_call(&api, "POST", "/accounts", "wrong", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_delete_account() { + #[tokio::test] + async fn only_admin_can_delete_account() { let api = test_accounts_api(); - let resp = api_call(&api, "DELETE", "/accounts/alice", "admin", DETAILS.clone()); + let resp = api_call(&api, "DELETE", "/accounts/alice", "admin", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "DELETE", "/accounts/alice", "wrong", DETAILS.clone()); + let resp = api_call(&api, "DELETE", "/accounts/alice", "wrong", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_modify_whole_account() { + #[tokio::test] + async fn only_admin_can_modify_whole_account() { let api = test_accounts_api(); - let resp = api_call(&api, "PUT", "/accounts/alice", "admin", DETAILS.clone()); + let resp = api_call(&api, "PUT", "/accounts/alice", "admin", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "PUT", "/accounts/alice", "wrong", DETAILS.clone()); + let resp = api_call(&api, "PUT", "/accounts/alice", "wrong", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_get_all_accounts() { + #[tokio::test] + async fn only_admin_can_get_all_accounts() { let api = test_accounts_api(); - let resp = api_call(&api, "GET", "/accounts", "admin", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts", "admin", None).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "GET", "/accounts", "wrong", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts", "wrong", None).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_or_user_can_get_account() { + #[tokio::test] + async fn only_admin_or_user_can_get_account() { let api = test_accounts_api(); - let resp = api_call(&api, "GET", "/accounts/alice", "admin", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts/alice", "admin", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); // TODO: Make this not require the username in the token - let resp = api_call(&api, "GET", "/accounts/alice", "password", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts/alice", "password", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "GET", "/accounts/alice", "wrong", DETAILS.clone()); + let resp = api_call(&api, "GET", "/accounts/alice", "wrong", DETAILS.clone()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_or_user_can_get_accounts_balance() { + #[tokio::test] + async fn only_admin_or_user_can_get_accounts_balance() { let api = test_accounts_api(); - let resp = api_call( - &api, - "GET", - "/accounts/alice/balance", - "admin", - DETAILS.clone(), - ); + let resp = api_call(&api, "GET", "/accounts/alice/balance", "admin", None).await; assert_eq!(resp.status().as_u16(), 200); // TODO: Make this not require the username in the token - let resp = api_call( - &api, - "GET", - "/accounts/alice/balance", - "password", - DETAILS.clone(), - ); + let resp = api_call(&api, "GET", "/accounts/alice/balance", "password", None).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call( - &api, - "GET", - "/accounts/alice/balance", - "wrong", - DETAILS.clone(), - ); + let resp = api_call(&api, "GET", "/accounts/alice/balance", "wrong", None).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_or_user_can_modify_accounts_settings() { + #[tokio::test] + async fn only_admin_or_user_can_modify_accounts_settings() { let api = test_accounts_api(); let resp = api_call( &api, @@ -735,7 +728,8 @@ mod tests { "/accounts/alice/settings", "admin", DETAILS.clone(), - ); + ) + .await; assert_eq!(resp.status().as_u16(), 200); // TODO: Make this not require the username in the token @@ -745,7 +739,8 @@ mod tests { "/accounts/alice/settings", "password", DETAILS.clone(), - ); + ) + .await; assert_eq!(resp.status().as_u16(), 200); let resp = api_call( @@ -754,7 +749,50 @@ mod tests { "/accounts/alice/settings", "wrong", DETAILS.clone(), - ); + ) + .await; + assert_eq!(resp.status().as_u16(), 401); + } + + #[tokio::test] + async fn only_admin_or_user_can_send_payment() { + let payment: Option = Some(serde_json::json!({ + "receiver": "some_receiver", + "source_amount" : 10, + })); + let api = test_accounts_api(); + let resp = api_call( + &api, + "POST", + "/accounts/alice/payments", + "password", + payment.clone(), + ) + .await; + // This should return an internal server error since we're making an invalid payment request + // We could have set up a mockito mock to set that pay is called correctly but we merely want + // to check that authorization and paths work as expected + assert_eq!(resp.status().as_u16(), 500); + + // Note that the operator has indirect access to the user's token since they control the store + let resp = api_call( + &api, + "POST", + "/accounts/alice/payments", + "admin", + payment.clone(), + ) + .await; + assert_eq!(resp.status().as_u16(), 401); + + let resp = api_call( + &api, + "POST", + "/accounts/alice/payments", + "wrong", + payment.clone(), + ) + .await; assert_eq!(resp.status().as_u16(), 401); } } diff --git a/crates/interledger-api/src/routes/node_settings.rs b/crates/interledger-api/src/routes/node_settings.rs index 40f12819b..c08fcf3ba 100644 --- a/crates/interledger-api/src/routes/node_settings.rs +++ b/crates/interledger-api/src/routes/node_settings.rs @@ -1,9 +1,6 @@ use crate::{http_retry::Client, ExchangeRates, NodeStore}; -use bytes::Buf; -use futures::{ - future::{err, join_all, Either}, - Future, -}; +use bytes::Bytes; +use futures::TryFutureExt; use interledger_http::{deserialize_json, error::*, HttpAccount, HttpStore}; use interledger_packet::Address; use interledger_router::RouterStore; @@ -19,7 +16,8 @@ use std::{ str::{self, FromStr}, }; use url::Url; -use warp::{self, Filter, Rejection}; +use uuid::Uuid; +use warp::{self, reply::Json, Filter, Rejection}; // TODO add more to this response #[derive(Clone, Serialize)] @@ -41,20 +39,21 @@ where + BalanceStore + ExchangeRateStore + RouterStore, - A: Account + HttpAccount + SettlementAccount + Serialize + 'static, + A: Account + HttpAccount + Send + Sync + SettlementAccount + Serialize + 'static, { // Helper filters let admin_auth_header = format!("Bearer {}", admin_api_token); let admin_only = warp::header::("authorization") - .and_then( - move |authorization: SecretString| -> Result<(), Rejection> { + .and_then(move |authorization: SecretString| { + let admin_auth_header = admin_auth_header.clone(); + async move { if authorization.expose_secret() == &admin_auth_header { - Ok(()) + Ok::<(), Rejection>(()) } else { - Err(ApiError::unauthorized().into()) + Err(Rejection::from(ApiError::unauthorized())) } - }, - ) + } + }) // This call makes it so we do not pass on a () value on // success to the next filter, it just gets rid of it .untuple_one() @@ -62,7 +61,7 @@ where let with_store = warp::any().map(move || store.clone()).boxed(); // GET / - let get_root = warp::get2() + let get_root = warp::get() .and(warp::path::end()) .and(with_store.clone()) .map(move |store: S| { @@ -75,197 +74,200 @@ where .boxed(); // PUT /rates - let put_rates = warp::put2() + let put_rates = warp::put() .and(warp::path("rates")) .and(warp::path::end()) .and(admin_only.clone()) .and(deserialize_json()) .and(with_store.clone()) - .and_then(|rates: ExchangeRates, store: S| -> Result<_, Rejection> { - if store.set_exchange_rates(rates.0.clone()).is_ok() { - Ok(warp::reply::json(&rates)) - } else { - error!("Error setting exchange rates"); - Err(ApiError::internal_server_error().into()) + .and_then(|rates: ExchangeRates, store: S| { + async move { + if store.set_exchange_rates(rates.0.clone()).is_ok() { + Ok(warp::reply::json(&rates)) + } else { + error!("Error setting exchange rates"); + Err(Rejection::from(ApiError::internal_server_error())) + } } }) .boxed(); // GET /rates - let get_rates = warp::get2() + let get_rates = warp::get() .and(warp::path("rates")) .and(warp::path::end()) .and(with_store.clone()) - .and_then(|store: S| -> Result<_, Rejection> { - if let Ok(rates) = store.get_all_exchange_rates() { - Ok(warp::reply::json(&rates)) - } else { - error!("Error getting exchange rates"); - Err(ApiError::internal_server_error().into()) + .and_then(|store: S| { + async move { + if let Ok(rates) = store.get_all_exchange_rates() { + Ok::(warp::reply::json(&rates)) + } else { + error!("Error getting exchange rates"); + Err(Rejection::from(ApiError::internal_server_error())) + } } }) .boxed(); // GET /routes // Response: Map of ILP Address prefix -> Username - let get_routes = warp::get2() + let get_routes = warp::get() .and(warp::path("routes")) .and(warp::path::end()) .and(with_store.clone()) .and_then(|store: S| { - // Convert the account IDs listed in the routing table - // to the usernames for the API response - let routes = store.routing_table().clone(); - store - .get_accounts(routes.values().cloned().collect()) - .map_err::<_, Rejection>(|_| { - error!("Error getting accounts from store"); - ApiError::internal_server_error().into() - }) - .and_then(move |accounts| { - let routes: HashMap = HashMap::from_iter( - routes - .iter() - .map(|(prefix, _)| prefix.to_string()) - .zip(accounts.into_iter().map(|a| a.username().to_string())), - ); + async move { + // Convert the account IDs listed in the routing table + // to the usernames for the API response + let routes = store.routing_table().clone(); + let accounts = store + .get_accounts(routes.values().cloned().collect()) + .map_err(|_| { + error!("Error getting accounts from store"); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + let routes: HashMap = HashMap::from_iter( + routes + .iter() + .map(|(prefix, _)| prefix.to_string()) + .zip(accounts.into_iter().map(|a| a.username().to_string())), + ); - Ok(warp::reply::json(&routes)) - }) + Ok::(warp::reply::json(&routes)) + } }) .boxed(); // PUT /routes/static // Body: Map of ILP Address prefix -> Username - let put_static_routes = warp::put2() + let put_static_routes = warp::put() .and(warp::path("routes")) .and(warp::path("static")) .and(warp::path::end()) .and(admin_only.clone()) .and(deserialize_json()) .and(with_store.clone()) - .and_then(|routes: HashMap, store: S| { - // Convert the usernames to account IDs to set the routes in the store - let store_clone = store.clone(); - let usernames: Vec = routes.values().cloned().collect(); - // TODO use one store call to look up all of the usernames - join_all(usernames.into_iter().map(move |username| { - store_clone - .get_account_id_from_username(&username) - .map_err(move |_| { - error!("No account exists with username: {}", username); - ApiError::account_not_found().into() - }) - })) - .and_then(move |account_ids| { + .and_then(move |routes: HashMap, store: S| { + async move { + // Convert the usernames to account IDs to set the routes in the store + let mut usernames: Vec = Vec::new(); + for username in routes.values() { + let user = match Username::from_str(&username) { + Ok(u) => u, + Err(_) => return Err(Rejection::from(ApiError::bad_request())), + }; + usernames.push(user); + } + + let mut account_ids: Vec = Vec::new(); + for username in usernames { + account_ids.push( + store + .get_account_id_from_username(&username) + .map_err(|_| { + error!("Error setting static routes"); + Rejection::from(ApiError::internal_server_error()) + }) + .await?, + ); + } + let prefixes = routes.keys().map(|s| s.to_string()); store .set_static_routes(prefixes.zip(account_ids.into_iter())) - .map_err::<_, Rejection>(|_| { + .map_err(|_| { error!("Error setting static routes"); - ApiError::internal_server_error().into() + Rejection::from(ApiError::internal_server_error()) }) - .map(move |_| warp::reply::json(&routes)) - }) + .await?; + Ok::(warp::reply::json(&routes)) + } }) .boxed(); // PUT /routes/static/:prefix // Body: Username - let put_static_route = warp::put2() + let put_static_route = warp::put() .and(warp::path("routes")) .and(warp::path("static")) - .and(warp::path::param2::()) + .and(warp::path::param::()) .and(warp::path::end()) .and(admin_only.clone()) - .and(warp::body::concat()) + .and(warp::body::bytes()) .and(with_store.clone()) - .and_then(|prefix: String, body: warp::body::FullBody, store: S| { - if let Ok(username) = str::from_utf8(body.bytes()) - .map_err(|_| ()) - .and_then(|string| Username::from_str(string).map_err(|_| ())) - { + .and_then(|prefix: String, body: Bytes, store: S| { + async move { + let username_str = + str::from_utf8(&body).map_err(|_| Rejection::from(ApiError::bad_request()))?; + let username = Username::from_str(username_str) + .map_err(|_| Rejection::from(ApiError::bad_request()))?; // Convert the username to an account ID to set it in the store - let username_clone = username.clone(); - Either::A( - store - .clone() - .get_account_id_from_username(&username) - .map_err(move |_| { - error!("No account exists with username: {}", username_clone); - ApiError::account_not_found().into() - }) - .and_then(move |account_id| { - store - .set_static_route(prefix, account_id) - .map_err::<_, Rejection>(|_| { - error!("Error setting static route"); - ApiError::internal_server_error().into() - }) - }) - .map(move |_| username.to_string()), - ) - } else { - Either::B(err(ApiError::bad_request().into())) + let account_id = store + .get_account_id_from_username(&username) + .map_err(|_| { + error!("No account exists with username: {}", username); + Rejection::from(ApiError::account_not_found()) + }) + .await?; + store + .set_static_route(prefix, account_id) + .map_err(|_| { + error!("Error setting static route"); + Rejection::from(ApiError::internal_server_error()) + }) + .await?; + Ok::(username.to_string()) } }) .boxed(); // PUT /settlement/engines - let put_settlement_engines = warp::put2() + let put_settlement_engines = warp::put() .and(warp::path("settlement")) .and(warp::path("engines")) .and(warp::path::end()) - .and(admin_only.clone()) - .and(deserialize_json()) - .and(with_store.clone()) - .and_then(|asset_to_url_map: HashMap, store: S| { + .and(admin_only) + .and(warp::body::json()) + .and(with_store) + .and_then(move |asset_to_url_map: HashMap, store: S| async move { let asset_to_url_map_clone = asset_to_url_map.clone(); store .set_settlement_engines(asset_to_url_map.clone()) - .map_err::<_, Rejection>(|_| { + .map_err(|_| { error!("Error setting settlement engines"); - ApiError::internal_server_error().into() - }) - .and_then(move |_| { - // Create the accounts on the settlement engines for any - // accounts that are using the default settlement engine URLs - // (This is done in case we modify the globally configured settlement - // engine URLs after accounts have already been added) + Rejection::from(ApiError::internal_server_error()) + }).await?; + // Create the accounts on the settlement engines for any + // accounts that are using the default settlement engine URLs + // (This is done in case we modify the globally configured settlement + // engine URLs after accounts have already been added) - // TODO we should come up with a better way of ensuring - // the accounts are created that doesn't involve loading - // all of the accounts from the database into memory - // (even if this isn't called often, it could crash the node at some point) - store.get_all_accounts() - .map_err(|_| ApiError::internal_server_error().into()) - .and_then(move |accounts| { - let client = Client::default(); - let create_settlement_accounts = - accounts.into_iter().filter_map(move |account| { - let id = account.id(); - // Try creating the account on the settlement engine if the settlement_engine_url of the - // account is the one we just configured as the default for the account's asset code - if let Some(details) = account.settlement_engine_details() { - if Some(&details.url) == asset_to_url_map.get(account.asset_code()) { - return Some(client.create_engine_account(details.url, account.id()) - .map_err(|_| ApiError::internal_server_error().into()) - .and_then(move |status_code| { - if status_code.is_success() { - trace!("Account {} created on the SE", id); - } else { - error!("Error creating account. Settlement engine responded with HTTP code: {}", status_code); - } - Ok(()) - })); - } - } - None - }); - join_all(create_settlement_accounts) - }) - }) - .and_then(move |_| Ok(warp::reply::json(&asset_to_url_map_clone))) + // TODO we should come up with a better way of ensuring + // the accounts are created that doesn't involve loading + // all of the accounts from the database into memory + // (even if this isn't called often, it could crash the node at some point) + let accounts = store.get_all_accounts() + .map_err(|_| Rejection::from(ApiError::internal_server_error())).await?; + + let client = Client::default(); + // Try creating the account on the settlement engine if the settlement_engine_url of the + // account is the one we just configured as the default for the account's asset code + for account in accounts { + if let Some(details) = account.settlement_engine_details() { + if Some(&details.url) == asset_to_url_map.get(account.asset_code()) { + let status_code = client.create_engine_account(details.url, account.id()) + .map_err(|_| Rejection::from(ApiError::internal_server_error())) + .await?; + if status_code.is_success() { + trace!("Account {} created on the SE", account.id()); + } else { + error!("Error creating account. Settlement engine responded with HTTP code: {}", status_code); + } + } + } + } + Ok::(warp::reply::json(&asset_to_url_map_clone)) }) .boxed(); @@ -284,10 +286,10 @@ mod tests { use crate::routes::test_helpers::{api_call, test_node_settings_api}; use serde_json::{json, Value}; - #[test] - fn gets_status() { + #[tokio::test] + async fn gets_status() { let api = test_node_settings_api(); - let resp = api_call(&api, "GET", "/", "", None); + let resp = api_call(&api, "GET", "/", "", None).await; assert_eq!(resp.status().as_u16(), 200); assert_eq!( resp.body(), @@ -295,10 +297,10 @@ mod tests { ); } - #[test] - fn gets_rates() { + #[tokio::test] + async fn gets_rates() { let api = test_node_settings_api(); - let resp = api_call(&api, "GET", "/rates", "", None); + let resp = api_call(&api, "GET", "/rates", "", None).await; assert_eq!(resp.status().as_u16(), 200); assert_eq!( serde_json::from_slice::(resp.body()).unwrap(), @@ -306,56 +308,60 @@ mod tests { ); } - #[test] - fn gets_routes() { + #[tokio::test] + async fn gets_routes() { let api = test_node_settings_api(); - let resp = api_call(&api, "GET", "/routes", "", None); + let resp = api_call(&api, "GET", "/routes", "", None).await; assert_eq!(resp.status().as_u16(), 200); } - #[test] - fn only_admin_can_put_rates() { + #[tokio::test] + async fn only_admin_can_put_rates() { let api = test_node_settings_api(); let rates = json!({"ABC": 1.0}); - let resp = api_call(&api, "PUT", "/rates", "admin", Some(rates.clone())); + let resp = api_call(&api, "PUT", "/rates", "admin", Some(rates.clone())).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "PUT", "/rates", "wrong", Some(rates)); + let resp = api_call(&api, "PUT", "/rates", "wrong", Some(rates)).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_put_static_routes() { + #[tokio::test] + async fn only_admin_can_put_static_routes() { let api = test_node_settings_api(); let routes = json!({"g.node1": "alice", "example.eu": "bob"}); - let resp = api_call(&api, "PUT", "/routes/static", "admin", Some(routes.clone())); + let resp = api_call(&api, "PUT", "/routes/static", "admin", Some(routes.clone())).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "PUT", "/routes/static", "wrong", Some(routes)); + let resp = api_call(&api, "PUT", "/routes/static", "wrong", Some(routes)).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_put_single_static_route() { + #[tokio::test] + async fn only_admin_can_put_single_static_route() { let api = test_node_settings_api(); - let api_put = move |auth: &str| { - warp::test::request() - .method("PUT") - .path("/routes/static/g.node1") - .body("alice") - .header("Authorization", format!("Bearer {}", auth.to_string())) - .reply(&api) + let api_put = |auth: String| { + let auth = format!("Bearer {}", auth); + async { + warp::test::request() + .method("PUT") + .path("/routes/static/g.node1") + .body("alice") + .header("Authorization", auth) + .reply(&api) + .await + } }; - let resp = api_put("admin"); + let resp = api_put("admin".to_owned()).await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_put("wrong"); + let resp = api_put("wrong".to_owned()).await; assert_eq!(resp.status().as_u16(), 401); } - #[test] - fn only_admin_can_put_engines() { + #[tokio::test] + async fn only_admin_can_put_engines() { let api = test_node_settings_api(); let engines = json!({"ABC": "http://localhost:3000", "XYZ": "http://localhost:3001"}); let resp = api_call( @@ -364,10 +370,11 @@ mod tests { "/settlement/engines", "admin", Some(engines.clone()), - ); + ) + .await; assert_eq!(resp.status().as_u16(), 200); - let resp = api_call(&api, "PUT", "/settlement/engines", "wrong", Some(engines)); + let resp = api_call(&api, "PUT", "/settlement/engines", "wrong", Some(engines)).await; assert_eq!(resp.status().as_u16(), 401); } } diff --git a/crates/interledger-api/src/routes/test_helpers.rs b/crates/interledger-api/src/routes/test_helpers.rs index 9e5023569..4f9eac131 100644 --- a/crates/interledger-api/src/routes/test_helpers.rs +++ b/crates/interledger-api/src/routes/test_helpers.rs @@ -2,12 +2,9 @@ use crate::{ routes::{accounts_api, node_settings_api}, AccountDetails, AccountSettings, NodeStore, }; +use async_trait::async_trait; use bytes::Bytes; -use futures::sync::mpsc::UnboundedSender; -use futures::{ - future::{err, ok}, - Future, -}; +use futures::channel::mpsc::UnboundedSender; use http::Response; use interledger_btp::{BtpAccount, BtpOutgoingService}; use interledger_ccp::{CcpRoutingAccount, RoutingRelation}; @@ -32,7 +29,7 @@ use url::Url; use uuid::Uuid; use warp::{self, Filter}; -pub fn api_call( +pub async fn api_call( api: &F, method: &str, endpoint: &str, // /ilp or /accounts/:username/ilp @@ -52,7 +49,7 @@ where ret = ret.header("Content-type", "application/json").json(&d); } - ret.reply(api) + ret.reply(api).await } pub fn test_node_settings_api( @@ -63,20 +60,20 @@ pub fn test_node_settings_api( pub fn test_accounts_api( ) -> impl warp::Filter + Clone { let incoming = incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: None, } - .build())) + .build()) }); let outgoing = outgoing_service_fn(move |_request| { - Box::new(ok(FulfillBuilder { + Ok(FulfillBuilder { fulfillment: &[0; 32], data: b"hello!", } - .build())) + .build()) }); let btp = BtpOutgoingService::new( Address::from_str("example.alice").unwrap(), @@ -174,22 +171,17 @@ impl CcpRoutingAccount for TestAccount { } } +#[async_trait] impl AccountStore for TestStore { type Account = TestAccount; - fn get_accounts( - &self, - _account_ids: Vec, - ) -> Box, Error = ()> + Send> { - Box::new(ok(vec![TestAccount])) + async fn get_accounts(&self, _account_ids: Vec) -> Result, ()> { + Ok(vec![TestAccount]) } // stub implementation (not used in these tests) - fn get_account_id_from_username( - &self, - _username: &Username, - ) -> Box + Send> { - Box::new(ok(Uuid::new_v4())) + async fn get_account_id_from_username(&self, _username: &Username) -> Result { + Ok(Uuid::new_v4()) } } @@ -216,91 +208,74 @@ impl RouterStore for TestStore { } } +#[async_trait] impl NodeStore for TestStore { type Account = TestAccount; - fn insert_account( - &self, - _account: AccountDetails, - ) -> Box + Send> { - Box::new(ok(TestAccount)) + async fn insert_account(&self, _account: AccountDetails) -> Result { + Ok(TestAccount) } - fn delete_account( - &self, - _id: Uuid, - ) -> Box + Send> { - Box::new(ok(TestAccount)) + async fn delete_account(&self, _id: Uuid) -> Result { + Ok(TestAccount) } - fn update_account( + async fn update_account( &self, _id: Uuid, _account: AccountDetails, - ) -> Box + Send> { - Box::new(ok(TestAccount)) + ) -> Result { + Ok(TestAccount) } - fn modify_account_settings( + async fn modify_account_settings( &self, _id: Uuid, _settings: AccountSettings, - ) -> Box + Send> { - Box::new(ok(TestAccount)) + ) -> Result { + Ok(TestAccount) } - fn get_all_accounts(&self) -> Box, Error = ()> + Send> { - Box::new(ok(vec![TestAccount, TestAccount])) + async fn get_all_accounts(&self) -> Result, ()> { + Ok(vec![TestAccount, TestAccount]) } - fn set_static_routes(&self, _routes: R) -> Box + Send> + async fn set_static_routes(&self, _routes: R) -> Result<(), ()> where - R: IntoIterator, + R: IntoIterator + Send + 'async_trait, { - Box::new(ok(())) + Ok(()) } - fn set_static_route( - &self, - _prefix: String, - _account_id: Uuid, - ) -> Box + Send> { - Box::new(ok(())) + async fn set_static_route(&self, _prefix: String, _account_id: Uuid) -> Result<(), ()> { + Ok(()) } - fn set_default_route( - &self, - _account_id: Uuid, - ) -> Box + Send> { + async fn set_default_route(&self, _account_id: Uuid) -> Result<(), ()> { unimplemented!() } - fn set_settlement_engines( + async fn set_settlement_engines( &self, - _asset_to_url_map: impl IntoIterator, - ) -> Box + Send> { - Box::new(ok(())) + _asset_to_url_map: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()> { + Ok(()) } - fn get_asset_settlement_engine( - &self, - _asset_code: &str, - ) -> Box, Error = ()> + Send> { - Box::new(ok(None)) + async fn get_asset_settlement_engine(&self, _asset_code: &str) -> Result, ()> { + Ok(None) } } +#[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { - unimplemented!() + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { + Ok(()) } - fn clear_ilp_address(&self) -> Box + Send> { - unimplemented!() + async fn clear_ilp_address(&self) -> Result<(), ()> { + Ok(()) } /// Get's the store's ilp address from memory @@ -325,47 +300,49 @@ impl StreamNotificationsStore for TestStore { } } +#[async_trait] impl BalanceStore for TestStore { - fn get_balance(&self, _account: TestAccount) -> Box + Send> { - Box::new(ok(1)) + async fn get_balance(&self, _account: TestAccount) -> Result { + Ok(1) } - fn update_balances_for_prepare( + async fn update_balances_for_prepare( &self, _from_account: TestAccount, _incoming_amount: u64, - ) -> Box + Send> { + ) -> Result<(), ()> { unimplemented!() } - fn update_balances_for_fulfill( + async fn update_balances_for_fulfill( &self, _to_account: TestAccount, _outgoing_amount: u64, - ) -> Box + Send> { + ) -> Result<(i64, u64), ()> { unimplemented!() } - fn update_balances_for_reject( + async fn update_balances_for_reject( &self, _from_account: TestAccount, _incoming_amount: u64, - ) -> Box + Send> { + ) -> Result<(), ()> { unimplemented!() } } +#[async_trait] impl HttpStore for TestStore { type Account = TestAccount; - fn get_account_from_http_auth( + async fn get_account_from_http_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { + ) -> Result { if username == &*USERNAME && token == AUTH_PASSWORD { - Box::new(ok(TestAccount)) + Ok(TestAccount) } else { - Box::new(err(())) + Err(()) } } } diff --git a/crates/interledger-btp/Cargo.toml b/crates/interledger-btp/Cargo.toml index 446750055..0d086a1f3 100644 --- a/crates/interledger-btp/Cargo.toml +++ b/crates/interledger-btp/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/interledger-rs/interledger-rs" bytes = { version = "0.4.12", default-features = false } byteorder = { version = "1.3.2", default-features = false } chrono = { version = "0.4.9", default-features = false } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3.1", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } log = { version = "0.4.8", default-features = false } @@ -19,18 +19,21 @@ num-bigint = { version = "0.2.3", default-features = false, features = ["std"] } parking_lot = { version = "0.9.0", default-features = false } quick-error = { version = "1.2.2", default-features = false } rand = { version = "0.7.2", default-features = false, features = ["std"] } -stream-cancel = { version = "0.4.4", default-features = false } -tokio-executor = { version = "0.1.8", default-features = false } -tokio-timer = { version = "0.2.11", default-features = false } -tokio-tungstenite = { version = "0.9.0", default-features = false, features = ["tls", "connect"] } -tungstenite = { version = "0.9.1", default-features = false } -url = { version = "2.1.0", default-features = false } +stream-cancel = { version = "0.5", default-features = false } +tokio-tungstenite = { version = "0.10.0", package = "tokio-tungstenite", git = "https://github.com/snapview/tokio-tungstenite", default-features = false, features = ["tls", "connect"] } + +tungstenite = { version = "0.9.2", default-features = false } +# we must force url v2.1.0 because stripping the "btp+" prefix from a BTP URL +# is an operation which panics +url = { version = "=2.1.0", default-features = false } uuid = { version = "0.8.1", default-features = false, features = ["v4"]} -warp = { version = "0.1.20", default-features = false, features = ["websocket"] } -secrecy = "0.5.1" +warp = { version = "0.2", default-features = false, features = ["websocket"] } +secrecy = "0.6" +async-trait = "0.1.22" +tokio = { version = "0.2.8", features = ["rt-core", "time", "stream", "macros"] } +lazy_static = { version = "1.4.0", default-features = false } +pin-project = "0.4.6" [dev-dependencies] hex = { version = "0.4.0", default-features = false } -lazy_static = { version = "1.4.0", default-features = false } -net2 = { version = "0.2.33", default-features = false } -tokio = { version = "0.1.22", default-features = false } +net2 = { version = "0.2.33", default-features = false } \ No newline at end of file diff --git a/crates/interledger-btp/src/client.rs b/crates/interledger-btp/src/client.rs index 7a0041e6e..027809e60 100644 --- a/crates/interledger-btp/src/client.rs +++ b/crates/interledger-btp/src/client.rs @@ -1,36 +1,26 @@ use super::packet::*; use super::service::BtpOutgoingService; use super::BtpAccount; -use futures::{future::join_all, Future, Sink, Stream}; +use futures::{future::join_all, SinkExt, StreamExt, TryFutureExt}; use interledger_packet::Address; use interledger_service::*; use log::{debug, error, trace}; use rand::random; use tokio_tungstenite::connect_async; use tungstenite::Message; -use url::{ParseError, Url}; - -pub fn parse_btp_url(uri: &str) -> Result { - let uri = if uri.starts_with("btp+") { - uri.split_at(4).1 - } else { - uri - }; - Url::parse(uri) -} /// Create a BtpOutgoingService wrapping BTP connections to the accounts specified. /// Calling `handle_incoming` with an `IncomingService` will turn the returned /// BtpOutgoingService into a bidirectional handler. -pub fn connect_client( +pub async fn connect_client( ilp_address: Address, accounts: Vec, error_on_unavailable: bool, next_outgoing: S, -) -> impl Future, Error = ()> +) -> Result, ()> where S: OutgoingService + Clone + 'static, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { let service = BtpOutgoingService::new(ilp_address, next_outgoing); let mut connect_btp = Vec::new(); @@ -42,17 +32,23 @@ where service.clone(), )); } - join_all(connect_btp).and_then(move |_| Ok(service)) + join_all(connect_btp).await; + Ok(service) } -pub fn connect_to_service_account( +/// Initiates a BTP connection with the specified account and saves it to the list of connections +/// maintained by the provided service. This is done in the following steps: +/// 1. Initialize a WebSocket connection at the BTP account's URL +/// 2. Send a BTP authorization packet to the peer +/// 3. If successful, consider the BTP connection established and add it to the service +pub async fn connect_to_service_account( account: A, error_on_unavailable: bool, service: BtpOutgoingService, -) -> impl Future +) -> Result<(), ()> where O: OutgoingService + Clone + 'static, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { let account_id = account.id(); let mut url = account @@ -67,58 +63,62 @@ where .map(|s| s.to_vec()) .unwrap_or_default(); debug!("Connecting to {}", url); - connect_async(url.clone()) + + let (mut connection, _) = connect_async(url.clone()) .map_err(move |err| { error!( "Error connecting to WebSocket server for account: {} {:?}", account_id, err ) }) - .and_then(move |(connection, _)| { - trace!( - "Connected to account {} (URI: {}), sending auth packet", - account_id, - url - ); - // Send BTP authentication - let auth_packet = Message::Binary( - BtpPacket::Message(BtpMessage { - request_id: random(), - protocol_data: vec![ - ProtocolData { - protocol_name: String::from("auth"), - content_type: ContentType::ApplicationOctetStream, - data: vec![], - }, - ProtocolData { - protocol_name: String::from("auth_token"), - content_type: ContentType::TextPlainUtf8, - data: token, - }, - ], - }) - .to_bytes(), - ); + .await?; + + trace!( + "Connected to account {} (UID: {}) (URI: {}), sending auth packet", + account.username(), + account_id, + url + ); - // TODO check that the response is a success before proceeding - // (right now we just assume they'll close the connection if the auth didn't work) - connection - .send(auth_packet) - .map_err(move |_| error!("Error sending auth packet on connection: {}", url)) - .then(move |result| match result { - Ok(connection) => { - debug!("Connected to account {}'s server", account.id()); - let connection = connection.from_err().sink_from_err(); - service.add_connection(account, connection); - Ok(()) - } - Err(_) => { - if error_on_unavailable { - Err(()) - } else { - Ok(()) - } - } - }) + // Send BTP authentication + let auth_packet = Message::binary( + BtpPacket::Message(BtpMessage { + request_id: random(), + protocol_data: vec![ + ProtocolData { + protocol_name: String::from("auth"), + content_type: ContentType::ApplicationOctetStream, + data: vec![], + }, + ProtocolData { + protocol_name: String::from("auth_token"), + content_type: ContentType::TextPlainUtf8, + data: token, + }, + ], }) + .to_bytes(), + ); + + // (right now we just assume they'll close the connection if the auth didn't work) + let result = connection // this just a stream + .send(auth_packet) + .map_err(move |_| error!("Error sending auth packet on connection: {}", url)) + .await; + + match result { + Ok(_) => { + debug!("Connected to account {}'s server", account.id()); + let connection = connection.filter_map(|v| async move { v.ok() }); + service.add_connection(account, connection); + Ok(()) + } + Err(_) => { + if error_on_unavailable { + Err(()) + } else { + Ok(()) + } + } + } } diff --git a/crates/interledger-btp/src/lib.rs b/crates/interledger-btp/src/lib.rs index fbc6981e4..623219055 100644 --- a/crates/interledger-btp/src/lib.rs +++ b/crates/interledger-btp/src/lib.rs @@ -6,7 +6,7 @@ //! Because this protocol uses WebSockets, only one party needs to have a publicly-accessible HTTPS //! endpoint but both sides can send and receive ILP packets. -use futures::Future; +use async_trait::async_trait; use interledger_service::{Account, Username}; use url::Url; @@ -16,37 +16,40 @@ mod oer; mod packet; mod server; mod service; +mod wrapped_ws; -pub use self::client::{connect_client, connect_to_service_account, parse_btp_url}; -pub use self::server::btp_service_as_filter; +pub use self::client::{connect_client, connect_to_service_account}; +pub use self::server::btp_service_as_filter; // This is consumed only by the node. pub use self::service::{BtpOutgoingService, BtpService}; +/// Extension trait for [Account](../interledger_service/trait.Account.html) with [ILP over BTP](https://interledger.org/rfcs/0023-bilateral-transfer-protocol/) related information pub trait BtpAccount: Account { + /// Returns the BTP Websockets URL corresponding to this account fn get_ilp_over_btp_url(&self) -> Option<&Url>; + /// Returns the BTP authentication token which is used when initiating a BTP connection + /// with a peer fn get_ilp_over_btp_outgoing_token(&self) -> Option<&[u8]>; } /// The interface for Store implementations that can be used with the BTP Server. +#[async_trait] pub trait BtpStore { type Account: BtpAccount; /// Load Account details based on the auth token received via BTP. - fn get_account_from_btp_auth( + async fn get_account_from_btp_auth( &self, username: &Username, token: &str, - ) -> Box + Send>; + ) -> Result; /// Load accounts that have a ilp_over_btp_url configured - fn get_btp_outgoing_accounts( - &self, - ) -> Box, Error = ()> + Send>; + async fn get_btp_outgoing_accounts(&self) -> Result, ()>; } #[cfg(test)] mod client_server { use super::*; - use futures::future::{err, lazy, ok, result}; use interledger_packet::{Address, ErrorCode, FulfillBuilder, PrepareBuilder, RejectBuilder}; use interledger_service::*; use net2::TcpBuilder; @@ -56,7 +59,6 @@ mod client_server { sync::Arc, time::{Duration, SystemTime}, }; - use tokio::runtime::Runtime; use uuid::Uuid; use lazy_static::lazy_static; @@ -126,13 +128,11 @@ mod client_server { accounts: Arc>, } + #[async_trait] impl AccountStore for TestStore { type Account = TestAccount; - fn get_accounts( - &self, - account_ids: Vec, - ) -> Box, Error = ()> + Send> { + async fn get_accounts(&self, account_ids: Vec) -> Result, ()> { let accounts: Vec = self .accounts .iter() @@ -145,156 +145,149 @@ mod client_server { }) .collect(); if accounts.len() == account_ids.len() { - Box::new(ok(accounts)) + Ok(accounts) } else { - Box::new(err(())) + Err(()) } } // stub implementation (not used in these tests) - fn get_account_id_from_username( - &self, - _username: &Username, - ) -> Box + Send> { - Box::new(ok(Uuid::new_v4())) + async fn get_account_id_from_username(&self, _username: &Username) -> Result { + Ok(Uuid::new_v4()) } } + #[async_trait] impl BtpStore for TestStore { type Account = TestAccount; - fn get_account_from_btp_auth( + async fn get_account_from_btp_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { - Box::new(result( - self.accounts - .iter() - .find(|account| { - if let Some(account_token) = &account.ilp_over_btp_incoming_token { - account_token == token && account.username() == username - } else { - false - } - }) - .cloned() - .ok_or(()), - )) + ) -> Result { + self.accounts + .iter() + .find(|account| { + if let Some(account_token) = &account.ilp_over_btp_incoming_token { + account_token == token && account.username() == username + } else { + false + } + }) + .cloned() + .ok_or(()) } - fn get_btp_outgoing_accounts( - &self, - ) -> Box, Error = ()> + Send> { - Box::new(ok(self + async fn get_btp_outgoing_accounts(&self) -> Result, ()> { + Ok(self .accounts .iter() .filter(|account| account.ilp_over_btp_url.is_some()) .cloned() - .collect())) + .collect()) } } // TODO should this be an integration test, since it binds to a port? - #[test] - fn client_server_test() { - let mut runtime = Runtime::new().unwrap(); - runtime - .block_on(lazy(|| { - let bind_addr = get_open_port(); - - let server_store = TestStore { - accounts: Arc::new(vec![TestAccount { - id: Uuid::new_v4(), - ilp_over_btp_incoming_token: Some("test_auth_token".to_string()), - ilp_over_btp_outgoing_token: None, - ilp_over_btp_url: None, - }]), - }; - let server_address = Address::from_str("example.server").unwrap(); - let btp_service = BtpOutgoingService::new( - server_address.clone(), - outgoing_service_fn(move |_| { - Err(RejectBuilder { - code: ErrorCode::F02_UNREACHABLE, - message: b"No other outgoing handler", - triggered_by: Some(&server_address), - data: &[], - } - .build()) - }), - ); - let filter = btp_service_as_filter(btp_service.clone(), server_store); - btp_service.handle_incoming(incoming_service_fn(|_| { - Ok(FulfillBuilder { - fulfillment: &[0; 32], - data: b"test data", - } - .build()) - })); - - let account = TestAccount { - id: Uuid::new_v4(), - ilp_over_btp_url: Some( - Url::parse(&format!("btp+ws://{}/accounts/alice/ilp/btp", bind_addr)) - .unwrap(), - ), - ilp_over_btp_outgoing_token: Some("test_auth_token".to_string()), - ilp_over_btp_incoming_token: None, - }; - let accounts = vec![account.clone()]; - let addr = Address::from_str("example.address").unwrap(); - let addr_clone = addr.clone(); - let client = connect_client( - addr.clone(), - accounts, - true, - outgoing_service_fn(move |_| { - Err(RejectBuilder { - code: ErrorCode::F02_UNREACHABLE, - message: &[], - data: &[], - triggered_by: Some(&addr_clone), - } - .build()) - }), - ) - .and_then(move |btp_service| { - let mut btp_service = - btp_service.handle_incoming(incoming_service_fn(move |_| { - Err(RejectBuilder { - code: ErrorCode::F02_UNREACHABLE, - message: &[], - data: &[], - triggered_by: Some(&addr), - } - .build()) - })); - let btp_service_clone = btp_service.clone(); - btp_service - .send_request(OutgoingRequest { - from: account.clone(), - to: account.clone(), - original_amount: 100, - prepare: PrepareBuilder { - destination: Address::from_str("example.destination").unwrap(), - amount: 100, - execution_condition: &[0; 32], - expires_at: SystemTime::now() + Duration::from_secs(30), - data: b"test data", - } - .build(), - }) - .map_err(|reject| println!("Packet was rejected: {:?}", reject)) - .and_then(move |_| { - btp_service_clone.close(); - Ok(()) - }) - }); - let server = warp::serve(filter); - tokio::spawn(server.bind(bind_addr)); - client + #[tokio::test] + async fn client_server_test() { + let bind_addr = get_open_port(); + + let server_store = TestStore { + accounts: Arc::new(vec![TestAccount { + id: Uuid::new_v4(), + ilp_over_btp_incoming_token: Some("test_auth_token".to_string()), + ilp_over_btp_outgoing_token: None, + ilp_over_btp_url: None, + }]), + }; + let server_address = Address::from_str("example.server").unwrap(); + let btp_service = BtpOutgoingService::new( + server_address.clone(), + outgoing_service_fn(move |_| { + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: b"No other outgoing handler", + triggered_by: Some(&server_address), + data: &[], + } + .build()) + }), + ); + btp_service + .clone() + .handle_incoming(incoming_service_fn(|_| { + Ok(FulfillBuilder { + fulfillment: &[0; 32], + data: b"test data", + } + .build()) + })) + .await; + let filter = btp_service_as_filter(btp_service.clone(), server_store); + let server = warp::serve(filter); + // Spawn the server and listen for incoming connections + tokio::spawn(server.bind(bind_addr)); + + // Try to connect + let account = TestAccount { + id: Uuid::new_v4(), + ilp_over_btp_url: Some( + Url::parse(&format!("btp+ws://{}/accounts/alice/ilp/btp", bind_addr)).unwrap(), + ), + ilp_over_btp_outgoing_token: Some("test_auth_token".to_string()), + ilp_over_btp_incoming_token: None, + }; + let accounts = vec![account.clone()]; + let addr = Address::from_str("example.address").unwrap(); + let addr_clone = addr.clone(); + + let btp_client = connect_client( + addr.clone(), + accounts, + true, + outgoing_service_fn(move |_| { + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: &[], + data: &[], + triggered_by: Some(&addr_clone), + } + .build()) + }), + ) + .await + .unwrap(); + + let mut btp_client = btp_client + .handle_incoming(incoming_service_fn(move |_| { + Err(RejectBuilder { + code: ErrorCode::F02_UNREACHABLE, + message: &[], + data: &[], + triggered_by: Some(&addr), + } + .build()) })) - .unwrap(); + .await; + + let res = btp_client + .send_request(OutgoingRequest { + from: account.clone(), + to: account.clone(), + original_amount: 100, + prepare: PrepareBuilder { + destination: Address::from_str("example.destination").unwrap(), + amount: 100, + execution_condition: &[0; 32], + expires_at: SystemTime::now() + Duration::from_secs(30), + data: b"test data", + } + .build(), + }) + .await; + assert!(res.is_ok()); + btp_service.close(); } } diff --git a/crates/interledger-btp/src/server.rs b/crates/interledger-btp/src/server.rs index 2933316d4..e171ed545 100644 --- a/crates/interledger-btp/src/server.rs +++ b/crates/interledger-btp/src/server.rs @@ -1,32 +1,23 @@ -use super::service::{BtpOutgoingService, WsError}; use super::{packet::*, BtpAccount, BtpStore}; -use futures::{future::result, Async, AsyncSink, Future, Poll, Sink, Stream}; +use super::{service::BtpOutgoingService, wrapped_ws::WsWrap}; +use futures::{FutureExt, Sink, Stream}; +use futures::{SinkExt, StreamExt, TryFutureExt}; use interledger_service::*; use log::{debug, error, warn}; use secrecy::{ExposeSecret, SecretString}; use std::time::Duration; -use tokio_timer::Timeout; -use tungstenite; use warp::{ self, - ws::{Message, WebSocket, Ws2}, + ws::{Message, WebSocket, Ws}, Filter, }; // Close the incoming websocket connection if the auth details // have not been received within this timeout const WEBSOCKET_TIMEOUT: Duration = Duration::from_secs(10); +const MAX_MESSAGE_SIZE: usize = 40000; -// const MAX_MESSAGE_SIZE: usize = 40000; - -/// Returns a BtpOutgoingService and a warp Filter. -/// -/// The BtpOutgoingService wraps all BTP/WebSocket connections that come -/// in on the given address. Calling `handle_incoming` with an `IncomingService` will -/// turn the returned BtpOutgoingService into a bidirectional handler. -/// The separation is designed to enable the returned BtpOutgoingService to be passed -/// to another service like the Router, and _then_ for the Router to be passed as the -/// IncomingService to the BTP server. +/// Returns a Warp Filter instantiated for the provided BtpOutgoingService service. /// /// The warp filter handles the websocket upgrades and adds incoming connections /// to the BTP service so that it will handle each of the messages. @@ -37,35 +28,24 @@ pub fn btp_service_as_filter( where O: OutgoingService + Clone + Send + Sync + 'static, S: BtpStore + Clone + Send + Sync + 'static, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { warp::path("accounts") - .and(warp::path::param2::()) + .and(warp::path::param::()) .and(warp::path("ilp")) .and(warp::path("btp")) .and(warp::path::end()) - .and(warp::ws2()) - .map(move |username: Username, ws: Ws2| { - let store = store.clone(); + .and(warp::ws()) + .map(move |username: Username, ws: Ws| { + // warp Websocket let service_clone = service.clone(); - ws.on_upgrade(move |ws: WebSocket| { - // TODO set max_message_size once https://github.com/seanmonstar/warp/pull/272 is merged - let service_clone = service_clone.clone(); - Timeout::new(validate_auth(store, username, ws), WEBSOCKET_TIMEOUT) - .and_then(move |(account, connection)| { - debug!( - "Added connection for account {}: (id: {})", - account.username(), - account.id() - ); - service_clone.add_connection(account, WsWrap { connection }); - Ok(()) - }) - .or_else(|_| { - warn!("Closing Websocket connection because of an error"); - Ok(()) - }) - }) + let store_clone = store.clone(); + ws.max_message_size(MAX_MESSAGE_SIZE) + .on_upgrade(|socket: WebSocket| { + // wrapper over tungstenite Websocket + add_connections(socket, username, service_clone, store_clone) + .map(|result| result.unwrap()) + }) }) .boxed() } @@ -74,93 +54,46 @@ where /// tungstenite Websocket connection. It is needed for /// compatibility with the BTP service that interacts with the /// websocket implementation from warp and tokio-tungstenite -struct WsWrap { - connection: W, -} - -impl Stream for WsWrap -where - W: Stream - + Sink, -{ - type Item = tungstenite::Message; - type Error = WsError; - - fn poll(&mut self) -> Poll, Self::Error> { - match self.connection.poll() { - Ok(Async::NotReady) => Ok(Async::NotReady), - Ok(Async::Ready(None)) => Ok(Async::Ready(None)), - Ok(Async::Ready(Some(message))) => { - let message = if message.is_ping() { - tungstenite::Message::Ping(message.into_bytes()) - } else if message.is_binary() { - tungstenite::Message::Binary(message.into_bytes()) - } else if message.is_text() { - tungstenite::Message::Text(message.to_str().unwrap_or_default().to_string()) - } else if message.is_close() { - tungstenite::Message::Close(None) - } else { - warn!( - "Got unexpected websocket message, closing connection: {:?}", - message - ); - tungstenite::Message::Close(None) - }; - Ok(Async::Ready(Some(message))) - } - Err(err) => Err(WsError::from(err)), - } - } -} - -impl Sink for WsWrap +async fn add_connections( + socket: WebSocket, + username: Username, + service: BtpOutgoingService, + store: S, +) -> Result<(), ()> where - W: Stream - + Sink, + O: OutgoingService + Clone + Send + Sync + 'static, + S: BtpStore + Clone + Send + Sync + 'static, + A: BtpAccount + Send + Sync + 'static, { - type SinkItem = tungstenite::Message; - type SinkError = WsError; - - fn start_send( - &mut self, - item: Self::SinkItem, - ) -> Result, Self::SinkError> { - match item { - tungstenite::Message::Binary(data) => self - .connection - .start_send(Message::binary(data)) - .map(|result| { - if let AsyncSink::NotReady(message) = result { - AsyncSink::NotReady(tungstenite::Message::Binary(message.into_bytes())) - } else { - AsyncSink::Ready - } - }) - .map_err(WsError::from), - tungstenite::Message::Text(data) => { - match self.connection.start_send(Message::text(data)) { - Ok(AsyncSink::NotReady(message)) => { - if let Ok(string) = String::from_utf8(message.into_bytes()) { - Ok(AsyncSink::NotReady(tungstenite::Message::text(string))) - } else { - Err(WsError::Tungstenite(tungstenite::Error::Utf8)) - } - } - Ok(AsyncSink::Ready) => Ok(AsyncSink::Ready), - Err(err) => Err(WsError::from(err)), + // We ignore all the errors + let socket = socket.filter_map(|v| async move { v.ok() }); + let (account, connection) = + match tokio::time::timeout(WEBSOCKET_TIMEOUT, validate_auth(store, username, socket)).await + { + Ok(res) => match res { + Ok(res) => res, + Err(_) => { + warn!("Closing Websocket connection because of invalid credentials"); + return Ok(()); } + }, + Err(_) => { + warn!("Closing Websocket connection because of an error"); + return Ok(()); } - // Ignore other message types because warp's WebSocket type doesn't - // allow us to send any other types of messages - // TODO make sure warp's websocket responds to pings and/or sends them to keep the - // connection alive - _ => Ok(AsyncSink::Ready), - } - } + }; - fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { - self.connection.poll_complete().map_err(WsError::from) - } + // We need to wrap our Warp connection in order to cast the Sink type + // to tungstenite::Message. This probably can be implemented with SinkExt::with + // but couldn't figure out how. + service.add_connection(account.clone(), WsWrap { connection }); + debug!( + "Added connection for account {}: (id: {})", + account.username(), + account.id() + ); + + Ok(()) } struct Auth { @@ -168,74 +101,61 @@ struct Auth { token: SecretString, } -fn validate_auth( +async fn validate_auth( store: S, username: Username, - connection: impl Stream - + Sink, -) -> impl Future< - Item = ( - A, - impl Stream - + Sink, - ), - Error = (), -> + connection: impl Stream + Sink, +) -> Result<(A, impl Stream + Sink), ()> where S: BtpStore + 'static, A: BtpAccount + 'static, { - get_auth(connection).and_then(move |(auth, connection)| { - debug!("Got BTP connection for username: {}", username); - store - .get_account_from_btp_auth(&username, &auth.token.expose_secret()) - .map_err(move |_| warn!("BTP connection does not correspond to an account")) - .and_then(move |account| { - let auth_response = Message::binary( - BtpResponse { - request_id: auth.request_id, - protocol_data: Vec::new(), - } - .to_bytes(), - ); - connection - .send(auth_response) - .map_err(|_err| error!("warp::Error sending auth response")) - .and_then(|connection| Ok((account, connection))) - }) - }) -} + let (auth, mut connection) = get_auth(Box::pin(connection)).await?; + debug!("Got BTP connection for username: {}", username); + let account = store + .get_account_from_btp_auth(&username, &auth.token.expose_secret()) + .map_err(move |_| warn!("BTP connection does not correspond to an account")) + .await?; + + let auth_response = Message::binary( + BtpResponse { + request_id: auth.request_id, + protocol_data: Vec::new(), + } + .to_bytes(), + ); -fn get_auth( - connection: impl Stream - + Sink, -) -> impl Future< - Item = ( - Auth, - impl Stream - + Sink, - ), - Error = (), -> { connection - .skip_while(|message| { - // Skip non-binary messages like Pings and Pongs - // Note that the BTP protocol spec technically specifies that - // the auth message MUST be the first packet sent over the - // WebSocket connection. However, the JavaScript implementation - // of BTP sends a Ping packet first, so we should ignore it. - // (Be liberal in what you accept but strict in what you send) - Ok(!message.is_binary()) - // TODO: should we error if the client sends something other than a binary or ping packet first? - }) - .into_future() - .map_err(|_err| ()) - .and_then(move |(message, connection)| { - // The first packet sent on the connection MUST be the auth packet - result(parse_auth(message).map(|auth| (auth, connection)).ok_or_else(|| { - warn!("Got a BTP connection where the first packet sent was not a valid BTP Auth message. Closing the connection") - })) - }) + .send(auth_response) + .map_err(|_| error!("warp::Error sending auth response")) + .await?; + + Ok((account, connection)) +} + +/// Reads the first non-empty non-error binary message from the WebSocket and attempts to parse it as an AuthToken +async fn get_auth( + connection: impl Stream + Sink + Unpin, +) -> Result<(Auth, impl Stream + Sink), ()> { + // Skip non-binary messages like Pings and Pongs + // Note that the BTP protocol spec technically specifies that + // the auth message MUST be the first packet sent over the + // WebSocket connection. However, the JavaScript implementation + // of BTP sends a Ping packet first, so we should ignore it. + // (Be liberal in what you accept but strict in what you send) + // TODO: should we error if the client sends something other than a binary or ping packet first? + let mut connection = + connection.skip_while(move |message| futures::future::ready(!message.is_binary())); + + // The first packet sent on the connection MUST be the auth packet + let message = connection.next().await; + match parse_auth(message) { + Some(auth) => Ok((auth, Box::pin(connection))), + None => { + warn!("Got a BTP connection where the first packet sent was not a valid BTP Auth message. Closing the connection"); + Err(()) + } + } } fn parse_auth(ws_packet: Option) -> Option { @@ -245,6 +165,9 @@ fn parse_auth(ws_packet: Option) -> Option { Ok(message) => { let request_id = message.request_id; let mut token: Option = None; + // The primary data should be the "auth" with empty data + // The secondary data MUST have the "auth_token" with the authorization + // token set as the data field for protocol_data in message.protocol_data.iter() { let protocol_name: &str = protocol_data.protocol_name.as_ref(); if protocol_name == "auth_token" { @@ -255,7 +178,7 @@ fn parse_auth(ws_packet: Option) -> Option { if let Some(token) = token { return Some(Auth { request_id, - token: SecretString::new(token.to_string()), + token: SecretString::new(token), }); } else { warn!("BTP packet is missing auth token"); diff --git a/crates/interledger-btp/src/service.rs b/crates/interledger-btp/src/service.rs index 88f0395ad..51643d958 100644 --- a/crates/interledger-btp/src/service.rs +++ b/crates/interledger-btp/src/service.rs @@ -1,68 +1,48 @@ use super::{packet::*, BtpAccount}; +use async_trait::async_trait; use bytes::BytesMut; use futures::{ - future::err, - sync::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, - sync::oneshot, - Future, Sink, Stream, + channel::{ + mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, + oneshot, + }, + future, FutureExt, Sink, Stream, StreamExt, }; use interledger_packet::{Address, ErrorCode, Fulfill, Packet, Prepare, Reject, RejectBuilder}; use interledger_service::*; +use lazy_static::lazy_static; use log::{debug, error, trace, warn}; use parking_lot::{Mutex, RwLock}; use rand::random; use std::collections::HashMap; -use std::{ - convert::TryFrom, error::Error, fmt, io, iter::IntoIterator, marker::PhantomData, sync::Arc, - time::Duration, -}; +use std::{convert::TryFrom, iter::IntoIterator, marker::PhantomData, sync::Arc, time::Duration}; use stream_cancel::{Trigger, Valve}; -use tokio_executor::spawn; -use tokio_timer::Interval; +use tokio::time; use tungstenite::Message; use uuid::Uuid; -use warp; const PING_INTERVAL: u64 = 30; // seconds -type IlpResultChannel = oneshot::Sender>; -type IncomingRequestBuffer = UnboundedReceiver<(A, u32, Prepare)>; - -#[derive(Debug)] -pub enum WsError { - Tungstenite(tungstenite::Error), - Warp(warp::Error), -} - -impl fmt::Display for WsError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - WsError::Tungstenite(err) => err.fmt(f), - WsError::Warp(err) => err.fmt(f), - } - } -} - -impl Error for WsError {} - -impl From for WsError { - fn from(err: tungstenite::Error) -> Self { - WsError::Tungstenite(err) - } +lazy_static! { + static ref PING: Message = Message::Ping(Vec::with_capacity(0)); + static ref PONG: Message = Message::Pong(Vec::with_capacity(0)); } -impl From for WsError { - fn from(err: warp::Error) -> Self { - WsError::Warp(err) - } -} +type IlpResultChannel = oneshot::Sender>; +type IncomingRequestBuffer = UnboundedReceiver<(A, u32, Prepare)>; -/// A container for BTP/WebSocket connections that implements OutgoingService -/// for sending outgoing ILP Prepare packets over one of the connected BTP connections. +/// The BtpOutgoingService wraps all BTP/WebSocket connections that come +/// in on the given address. It implements OutgoingService for sending +/// outgoing ILP Prepare packets over one of the connected BTP connections. +/// Calling `handle_incoming` with an `IncomingService` will turn the returned +/// BtpOutgoingService into a bidirectional handler. +/// The separation is designed to enable the returned BtpOutgoingService to be passed +/// to another service like the Router, and _then_ for the Router to be passed as the +/// IncomingService to the BTP server. #[derive(Clone)] pub struct BtpOutgoingService { - // TODO support multiple connections per account ilp_address: Address, + /// Outgoing messages for the receiver of the websocket indexed by account uid connections: Arc>>>, pending_outgoing: Arc>>, pending_incoming: Arc>>>, @@ -72,10 +52,76 @@ pub struct BtpOutgoingService { stream_valve: Arc, } +/// Handle the packets based on whether they are an incoming request or a response to something we sent. +/// a. If it's a Prepare packet, it gets buffered in the incoming_sender channel which will get consumed +/// once an incoming handler is added +/// b. If it's a Fulfill/Reject packet, it gets added to the pending_outgoing hashmap which gets consumed +/// by the outgoing service implementation immediately +/// incoming_sender.unbounded_send basically sends data to the self.incoming_receiver +/// to be consumed when we setup the incoming handler +/// Set up a listener to handle incoming packets from the WebSocket connection +#[inline] +async fn handle_message( + message: Message, + tx_clone: UnboundedSender, + account: A, + pending_requests: Arc>>, + incoming_sender: UnboundedSender<(A, u32, Prepare)>, +) { + if message.is_binary() { + match parse_ilp_packet(message) { + // Queues up the prepare packet + Ok((request_id, Packet::Prepare(prepare))) => { + trace!( + "Got incoming Prepare packet on request ID: {} {:?}", + request_id, + prepare + ); + let _ = incoming_sender + .unbounded_send((account, request_id, prepare)) + .map_err(|err| error!("Unable to buffer incoming request: {:?}", err)); + } + // Sends the fulfill/reject to the outgoing service + Ok((request_id, Packet::Fulfill(fulfill))) => { + trace!("Got fulfill response to request id {}", request_id); + if let Some(channel) = (*pending_requests.lock()).remove(&request_id) { + let _ = channel.send(Ok(fulfill)).map_err(|fulfill| error!("Error forwarding Fulfill packet back to the Future that sent the Prepare: {:?}", fulfill)); + } else { + warn!( + "Got Fulfill packet that does not match an outgoing Prepare we sent: {:?}", + fulfill + ); + } + } + Ok((request_id, Packet::Reject(reject))) => { + trace!("Got reject response to request id {}", request_id); + if let Some(channel) = (*pending_requests.lock()).remove(&request_id) { + let _ = channel.send(Err(reject)).map_err(|reject| error!("Error forwarding Reject packet back to the Future that sent the Prepare: {:?}", reject)); + } else { + warn!( + "Got Reject packet that does not match an outgoing Prepare we sent: {:?}", + reject + ); + } + } + Err(_) => { + debug!("Unable to parse ILP packet from BTP packet (if this is the first time this appears, the packet was probably the auth response)"); + // TODO Send error back + } + } + } else if message.is_ping() { + trace!("Responding to Ping message from account {}", account.id()); + // Writes back the PONG to the websocket + let _ = tx_clone + .unbounded_send(PONG.clone()) + .map_err(|err| error!("Error sending Pong message back: {:?}", err)); + } +} + impl BtpOutgoingService where O: OutgoingService + Clone, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { pub fn new(ilp_address: Address, next: O) -> Self { let (incoming_sender, incoming_receiver) = unbounded(); @@ -100,133 +146,88 @@ where self.close_all_connections.lock().take(); } - /// Set up a WebSocket connection so that outgoing Prepare packets can be sent to it, - /// incoming Prepare packets are buffered in a channel (until an IncomingService is added - /// via the handle_incoming method), and ILP Fulfill and Reject packets will be - /// sent back to the Future that sent the outgoing request originally. + // Set up a WebSocket connection so that outgoing Prepare packets can be sent to it, + // incoming Prepare packets are buffered in a channel (until an IncomingService is added + // via the handle_incoming method), and ILP Fulfill and Reject packets will be + // sent back to the Future that sent the outgoing request originally. pub(crate) fn add_connection( &self, account: A, - connection: impl Stream - + Sink - + Send - + 'static, + ws_stream: impl Stream + Sink + Send + 'static, ) { let account_id = account.id(); - // Set up a channel to forward outgoing packets to the WebSocket connection - let (tx, rx) = unbounded(); - let (sink, stream) = connection.split(); + let (client_tx, client_rx) = unbounded(); + let (write, read) = ws_stream.split(); let (close_connection, valve) = Valve::new(); - let stream = valve.wrap(stream); - let stream = self.stream_valve.wrap(stream); - let forward_to_connection = sink - .send_all(rx.map_err(|_err| { - WsError::Tungstenite(io::Error::from(io::ErrorKind::ConnectionAborted).into()) - })) - .then(move |_| { + + // tx -> rx -> write -> our peer + // Responsible mainly for responding to Pings + let write_to_ws = client_rx.map(Ok).forward(write).then(move |_| { + async move { debug!( "Finished forwarding to WebSocket stream for account: {}", account_id ); + // When this is dropped, the read valve will close drop(close_connection); - Ok(()) - }); + Ok::<(), ()>(()) + } + }); + tokio::spawn(write_to_ws); - // Send pings every PING_INTERVAL until the connection closes or the Service is dropped - let tx_clone = tx.clone(); - let send_pings = valve - .wrap( - self.stream_valve - .wrap(Interval::new_interval(Duration::from_secs(PING_INTERVAL))), + // Process incoming messages depending on their type + let pending_outgoing = self.pending_outgoing.clone(); + let incoming_sender = self.incoming_sender.clone(); + let client_tx_clone = client_tx.clone(); + let handle_message_fn = move |msg: Message| { + handle_message( + msg, + client_tx_clone.clone(), + account.clone(), + pending_outgoing.clone(), + incoming_sender.clone(), ) - .map_err(|err| { - warn!("Timer error on Ping interval: {:?}", err); - }) - .for_each(move |_| { - if let Err(err) = tx_clone.unbounded_send(Message::Ping(Vec::with_capacity(0))) { - warn!( - "Error sending Ping on connection to account {}: {:?}", - account_id, err - ); - } - Ok(()) - }); - spawn(send_pings); + }; - // Set up a listener to handle incoming packets from the WebSocket connection - // TODO do we need all this cloning? - let pending_requests = self.pending_outgoing.clone(); - let incoming_sender = self.incoming_sender.clone(); - let tx_clone = tx.clone(); - let handle_incoming = stream.map_err(move |err| error!("Error reading from WebSocket stream for account {}: {:?}", account_id, err)).for_each(move |message| { - // Handle the packets based on whether they are an incoming request or a response to something we sent - if message.is_binary() { - match parse_ilp_packet(message) { - Ok((request_id, Packet::Prepare(prepare))) => { - trace!("Got incoming Prepare packet on request ID: {} {:?}", request_id, prepare); - incoming_sender.clone().unbounded_send((account.clone(), request_id, prepare)) - .map_err(|err| error!("Unable to buffer incoming request: {:?}", err)) - }, - Ok((request_id, Packet::Fulfill(fulfill))) => { - trace!("Got fulfill response to request id {}", request_id); - if let Some(channel) = (*pending_requests.lock()).remove(&request_id) { - channel.send(Ok(fulfill)).map_err(|fulfill| error!("Error forwarding Fulfill packet back to the Future that sent the Prepare: {:?}", fulfill)) - } else { - warn!("Got Fulfill packet that does not match an outgoing Prepare we sent: {:?}", fulfill); - Ok(()) - } - } - Ok((request_id, Packet::Reject(reject))) => { - trace!("Got reject response to request id {}", request_id); - if let Some(channel) = (*pending_requests.lock()).remove(&request_id) { - channel.send(Err(reject)).map_err(|reject| error!("Error forwarding Reject packet back to the Future that sent the Prepare: {:?}", reject)) - } else { - warn!("Got Reject packet that does not match an outgoing Prepare we sent: {:?}", reject); - Ok(()) - } - }, - Err(_) => { - debug!("Unable to parse ILP packet from BTP packet (if this is the first time this appears, the packet was probably the auth response)"); - // TODO Send error back - Ok(()) - } - } - } else if message.is_ping() { - trace!("Responding to Ping message from account {}", account.id()); - tx_clone.unbounded_send(Message::Pong(Vec::new())).map_err(|err| error!("Error sending Pong message back: {:?}", err)) - } else { - Ok(()) - } - }).then(move |result| { - debug!("Finished reading from WebSocket stream for account: {}", account_id); - result + // Close connections trigger + let read = valve.wrap(read); // close when `write_to_ws` calls `drop(connection)` + let read = self.stream_valve.wrap(read); + let read_from_ws = read.for_each(handle_message_fn).then(move |_| { + async move { + debug!( + "Finished reading from WebSocket stream for account: {}", + account_id + ); + Ok::<(), ()>(()) + } }); + tokio::spawn(read_from_ws); - let connections = self.connections.clone(); - let keep_connections_open = self.close_all_connections.clone(); - let handle_connection = handle_incoming - .select(forward_to_connection) - .then(move |_| { - let _ = keep_connections_open; - let mut connections = connections.write(); - connections.remove(&account_id); - debug!( - "WebSocket connection closed for account {} ({} connections still open)", - account_id, - connections.len() + // Send pings every PING_INTERVAL until the connection closes (when `drop(close_connection)` is called) + // or the Service is dropped (which will implicitly drop `close_all_connections`, closing the stream_valve) + let tx_clone = client_tx.clone(); + let ping_interval = time::interval(Duration::from_secs(PING_INTERVAL)); + let repeat_until_service_drops = self.stream_valve.wrap(ping_interval); + let send_pings = valve.wrap(repeat_until_service_drops).for_each(move |_| { + // For each tick send a ping + if let Err(err) = tx_clone.unbounded_send(PING.clone()) { + warn!( + "Error sending Ping on connection to account {}: {:?}", + account_id, err ); - Ok(()) - }); - spawn(handle_connection); + } + future::ready(()) + }); + tokio::spawn(send_pings); // Save the sender side of the channel so we have a way to forward outgoing requests to the WebSocket - self.connections.write().insert(account_id, tx); + self.connections.write().insert(account_id, client_tx); } /// Convert this BtpOutgoingService into a bidirectional BtpService by adding a handler for incoming requests. /// This will automatically pull all incoming Prepare packets from the channel buffer and call the IncomingService with them. - pub fn handle_incoming(self, incoming_handler: I) -> BtpService + pub async fn handle_incoming(self, incoming_handler: I) -> BtpService where I: IncomingService + Clone + Send + 'static, { @@ -234,14 +235,14 @@ where // the incoming Prepare packets they get in self.pending_incoming // Now that we're adding an incoming handler, this will spawn a task to read // all Prepare packets from the buffer, handle them, and send the responses back - let mut incoming_handler_clone = incoming_handler.clone(); let connections_clone = self.connections.clone(); - let handle_pending_incoming = self + let mut handle_pending_incoming = self .pending_incoming .lock() .take() - .expect("handle_incoming can only be called once") - .for_each(move |(account, request_id, prepare)| { + .expect("handle_incoming can only be called once"); + let handle_pending_incoming_fut = async move { + while let Some((account, request_id, prepare)) = handle_pending_incoming.next().await { let account_id = account.id(); let connections_clone = connections_clone.clone(); let request = IncomingRequest { @@ -254,37 +255,33 @@ where request.from.username(), request.from.id() ); - incoming_handler_clone - .handle_request(request) - .then(move |result| { - let packet = match result { - Ok(fulfill) => Packet::Fulfill(fulfill), - Err(reject) => Packet::Reject(reject), - }; - if let Some(connection) = connections_clone - .read() - .get(&account_id) { - let message = ilp_packet_to_ws_message(request_id, packet); - connection - .clone() - .unbounded_send(message) - .map_err(move |err| { - error!( - "Error sending response to account: {} {:?}", - account_id, err - ) - }) - } else { - error!("Error sending response to account: {}, connection was closed. {:?}", account_id, packet); - Err(()) - } - }) - }) - .then(move |_| { - trace!("Finished reading from pending_incoming buffer"); - Ok(()) - }); - spawn(handle_pending_incoming); + let mut handler = incoming_handler.clone(); + let packet = match handler.handle_request(request).await { + Ok(fulfill) => Packet::Fulfill(fulfill), + Err(reject) => Packet::Reject(reject), + }; + + if let Some(connection) = connections_clone.clone().read().get(&account_id) { + let message = ilp_packet_to_ws_message(request_id, packet); + let _ = connection.unbounded_send(message).map_err(move |err| { + error!( + "Error sending response to account: {} {:?}", + account_id, err + ) + }); + } else { + error!( + "Error sending response to account: {}, connection was closed. {:?}", + account_id, packet + ); + } + } + + trace!("Finished reading from pending_incoming buffer"); + Ok::<(), ()>(()) + }; + + tokio::spawn(handle_pending_incoming_fut); BtpService { outgoing: self, @@ -293,20 +290,20 @@ where } } +#[async_trait] impl OutgoingService for BtpOutgoingService where - O: OutgoingService + Clone, - A: BtpAccount + 'static, + O: OutgoingService + Send + Sync + Clone + 'static, + A: BtpAccount + Send + Sync + Clone + 'static, { - type Future = BoxedIlpFuture; - /// Send an outgoing request to one of the open connections. /// /// If there is no open connection for the Account specified in `request.to`, the /// request will be passed through to the `next` handler. - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { let account_id = request.to.id(); - if let Some(connection) = (*self.connections.read()).get(&account_id) { + let connections = self.connections.read().clone(); // have to clone here to avoid await errors + if let Some(connection) = connections.get(&account_id) { let request_id = random::(); let ilp_address = self.ilp_address.clone(); @@ -315,11 +312,14 @@ where let keep_connections_open = self.close_all_connections.clone(); trace!( - "Sending outgoing request {} to account {}", + "Sending outgoing request {} to {} ({})", request_id, + request.to.username(), account_id ); + // Connection is an unbounded sender which sends to the rx that + // forwards to the sink which sends the data over match connection.unbounded_send(ilp_packet_to_ws_message( request_id, Packet::Prepare(request.prepare), @@ -327,59 +327,53 @@ where Ok(_) => { let (sender, receiver) = oneshot::channel(); (*self.pending_outgoing.lock()).insert(request_id, sender); - Box::new( - receiver - .then(move |result| { - // Drop the trigger here since we've gotten the response - // and don't need to keep the connections open if this was the - // last thing we were waiting for - let _ = keep_connections_open; - result - }) - .map_err(move |err| { - error!( - "Sending request {} to account {} failed: {:?}", - request_id, account_id, err - ); - RejectBuilder { - code: ErrorCode::T00_INTERNAL_ERROR, - message: &[], - triggered_by: Some(&ilp_address), - data: &[], - } - .build() - }) - .and_then(|result| match result { - Ok(fulfill) => Ok(fulfill), - Err(reject) => Err(reject), - }), - ) + let result = receiver.await; + // Drop the trigger here since we've gotten the response + // and don't need to keep the connections open if this was the + // last thing we were waiting for + let _ = keep_connections_open; + match result { + // This can be either a reject or a fulfill packet + Ok(packet) => packet, + Err(err) => { + error!( + "Sending request {} to account {} failed: {:?}", + request_id, account_id, err + ); + Err(RejectBuilder { + code: ErrorCode::T00_INTERNAL_ERROR, + message: &[], + triggered_by: Some(&ilp_address), + data: &[], + } + .build()) + } + } } Err(send_error) => { error!( "Error sending websocket message for request {} to account {}: {:?}", request_id, account_id, send_error ); - let reject = RejectBuilder { + Err(RejectBuilder { code: ErrorCode::T00_INTERNAL_ERROR, message: &[], triggered_by: Some(&ilp_address), data: &[], } - .build(); - Box::new(err(reject)) + .build()) } } - } else if request.to.get_ilp_over_btp_url().is_some() - || request.to.get_ilp_over_btp_outgoing_token().is_some() - { - trace!( - "No open connection for account: {}, forwarding request to the next service", - request.to.id() - ); - Box::new(self.next.send_request(request)) } else { - Box::new(self.next.send_request(request)) + if request.to.get_ilp_over_btp_url().is_some() + || request.to.get_ilp_over_btp_outgoing_token().is_some() + { + trace!( + "No open connection for account: {}, forwarding request to the next service", + request.to.username() + ); + } + self.next.send_request(request).await } } } @@ -394,7 +388,7 @@ impl BtpService where I: IncomingService + Clone + Send + 'static, O: OutgoingService + Clone, - A: BtpAccount + 'static, + A: BtpAccount + Send + Sync + 'static, { /// Close all of the open WebSocket connections pub fn close(&self) { @@ -402,19 +396,19 @@ where } } +#[async_trait] impl OutgoingService for BtpService where - O: OutgoingService + Clone + Send + 'static, - A: BtpAccount + 'static, + I: Send, // This is a async/await requirement + O: OutgoingService + Send + Sync + Clone + 'static, + A: BtpAccount + Send + Sync + Clone + 'static, { - type Future = BoxedIlpFuture; - /// Send an outgoing request to one of the open connections. /// /// If there is no open connection for the Account specified in `request.to`, the /// request will be passed through to the `next` handler. - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { - self.outgoing.send_request(request) + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { + self.outgoing.send_request(request).await } } @@ -460,42 +454,31 @@ fn parse_ilp_packet(message: Message) -> Result<(u32, Packet), ()> { } fn ilp_packet_to_ws_message(request_id: u32, packet: Packet) -> Message { - match packet { - Packet::Prepare(prepare) => { - let data = BytesMut::from(prepare).to_vec(); - let btp_packet = BtpMessage { - request_id, - protocol_data: vec![ProtocolData { - protocol_name: "ilp".to_string(), - content_type: ContentType::ApplicationOctetStream, - data, - }], - }; - Message::binary(btp_packet.to_bytes()) + let (data, is_response) = match packet { + Packet::Prepare(prepare) => (BytesMut::from(prepare).to_vec(), false), + Packet::Fulfill(fulfill) => (BytesMut::from(fulfill).to_vec(), true), + Packet::Reject(reject) => (BytesMut::from(reject).to_vec(), true), + }; + let btp_packet = if is_response { + BtpMessage { + request_id, + protocol_data: vec![ProtocolData { + protocol_name: "ilp".to_string(), + content_type: ContentType::ApplicationOctetStream, + data, + }], } - Packet::Fulfill(fulfill) => { - let data = BytesMut::from(fulfill).to_vec(); - let btp_packet = BtpResponse { - request_id, - protocol_data: vec![ProtocolData { - protocol_name: "ilp".to_string(), - content_type: ContentType::ApplicationOctetStream, - data, - }], - }; - Message::binary(btp_packet.to_bytes()) - } - Packet::Reject(reject) => { - let data = BytesMut::from(reject).to_vec(); - let btp_packet = BtpResponse { - request_id, - protocol_data: vec![ProtocolData { - protocol_name: "ilp".to_string(), - content_type: ContentType::ApplicationOctetStream, - data, - }], - }; - Message::binary(btp_packet.to_bytes()) + .to_bytes() + } else { + BtpResponse { + request_id, + protocol_data: vec![ProtocolData { + protocol_name: "ilp".to_string(), + content_type: ContentType::ApplicationOctetStream, + data, + }], } - } + .to_bytes() + }; + Message::binary(btp_packet) } diff --git a/crates/interledger-btp/src/wrapped_ws.rs b/crates/interledger-btp/src/wrapped_ws.rs new file mode 100644 index 000000000..230bc7417 --- /dev/null +++ b/crates/interledger-btp/src/wrapped_ws.rs @@ -0,0 +1,92 @@ +use futures::stream::Stream; +use futures::Sink; +use log::warn; +use pin_project::pin_project; +use std::pin::Pin; +use std::task::{Context, Poll}; +use warp::ws::Message; + +/// Wrapper struct to unify the Tungstenite WebSocket connection from connect_async +/// with the Warp websocket connection from ws.upgrade. Stream and Sink are re-implemented +/// for this struct, normalizing it to use Tungstenite's messages and a wrapped error type +#[pin_project] +#[derive(Clone)] +pub(crate) struct WsWrap { + #[pin] + pub(crate) connection: W, +} + +impl Stream for WsWrap +where + W: Stream, +{ + type Item = tungstenite::Message; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = self.project(); + match this.connection.poll_next(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(val) => match val { + Some(v) => { + let v = convert_msg(v); + Poll::Ready(Some(v)) + } + None => Poll::Ready(None), + }, + } + } +} + +impl Sink for WsWrap +where + W: Sink, +{ + type Error = W::Error; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.connection.poll_ready(cx) + } + + fn start_send(self: Pin<&mut Self>, item: tungstenite::Message) -> Result<(), Self::Error> { + let this = self.project(); + let item = match item { + tungstenite::Message::Binary(data) => Message::binary(data), + tungstenite::Message::Text(data) => Message::text(data), + // Ignore other message types because warp's WebSocket type doesn't + // allow us to send any other types of messages + // TODO make sure warp's websocket responds to pings and/or sends them to keep the + // connection alive + _ => return Ok(()), + }; + this.connection.start_send(item) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.connection.poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.connection.poll_close(cx) + } +} + +fn convert_msg(message: Message) -> tungstenite::Message { + if message.is_ping() { + tungstenite::Message::Ping(message.into_bytes()) + } else if message.is_binary() { + tungstenite::Message::Binary(message.into_bytes()) + } else if message.is_text() { + tungstenite::Message::Text(message.to_str().unwrap_or_default().to_string()) + } else if message.is_close() { + tungstenite::Message::Close(None) + } else { + warn!( + "Got unexpected websocket message, closing connection: {:?}", + message + ); + tungstenite::Message::Close(None) + } +} diff --git a/crates/interledger-ccp/Cargo.toml b/crates/interledger-ccp/Cargo.toml index 203030e2c..608cec3da 100644 --- a/crates/interledger-ccp/Cargo.toml +++ b/crates/interledger-ccp/Cargo.toml @@ -10,7 +10,7 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] bytes = { version = "0.4.12", default-features = false } byteorder = { version = "1.3.2", default-features = false } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3", default-features = false } hex = { version = "0.4.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } @@ -18,7 +18,7 @@ lazy_static = { version = "1.4.0", default-features = false } log = { version = "0.4.8", default-features = false } parking_lot = { version = "0.9.0", default-features = false } ring = { version = "0.16.9", default-features = false } -tokio-executor = { version = "0.1.8", default-features = false } -tokio-timer = { version = "0.2.11", default-features = false } uuid = { version = "0.8.1", default-features = false, features = ["v4"]} serde = { version = "1.0.101", default-features = false, features = ["derive"] } +async-trait = "0.1.22" +tokio = { version = "0.2.6", features = ["time", "rt-core", "macros"] } \ No newline at end of file diff --git a/crates/interledger-ccp/src/lib.rs b/crates/interledger-ccp/src/lib.rs index 0eece31c6..a10fa2131 100644 --- a/crates/interledger-ccp/src/lib.rs +++ b/crates/interledger-ccp/src/lib.rs @@ -9,7 +9,7 @@ //! updates are used by the `Router` to forward incoming packets to the best next hop //! we know about. -use futures::Future; +use async_trait::async_trait; use interledger_service::Account; use std::collections::HashMap; use std::{fmt, str::FromStr}; @@ -30,7 +30,7 @@ use serde::{Deserialize, Serialize}; /// Data structure used to describe the routing relation of an account with its peers. #[repr(u8)] -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Serialize, Deserialize, Ord, Eq)] pub enum RoutingRelation { /// An account from which we do not receive routes from, neither broadcast /// routes to @@ -98,25 +98,30 @@ pub trait CcpRoutingAccount: Account { type Routes = HashMap; type LocalAndConfiguredRoutes = (Routes, Routes); +/// Store trait for managing the routes broadcast and set over Connector to Connector protocol +#[async_trait] pub trait RouteManagerStore: Clone { type Account: CcpRoutingAccount; // TODO should we have a way to only get the details for specific routes? - fn get_local_and_configured_routes( + /// Gets the local and manually configured routes + async fn get_local_and_configured_routes( &self, - ) -> Box, Error = ()> + Send>; + ) -> Result, ()>; - fn get_accounts_to_send_routes_to( + /// Gets all accounts which the node should send routes to (Peer and Child accounts) + /// The caller can also pass a vector of account ids to be ignored + async fn get_accounts_to_send_routes_to( &self, ignore_accounts: Vec, - ) -> Box, Error = ()> + Send>; + ) -> Result, ()>; - fn get_accounts_to_receive_routes_from( - &self, - ) -> Box, Error = ()> + Send>; + /// Gets all accounts which the node should receive routes to (Peer and Parent accounts) + async fn get_accounts_to_receive_routes_from(&self) -> Result, ()>; - fn set_routes( + /// Sets the new routes to the store (prefix -> account) + async fn set_routes( &mut self, - routes: impl IntoIterator, - ) -> Box + Send>; + routes: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()>; } diff --git a/crates/interledger-ccp/src/packet.rs b/crates/interledger-ccp/src/packet.rs index 9162a1675..641681cf3 100644 --- a/crates/interledger-ccp/src/packet.rs +++ b/crates/interledger-ccp/src/packet.rs @@ -38,6 +38,9 @@ lazy_static! { Address::from_str("peer.route.update").unwrap(); } +/// CCP Packet mode used in Route Control Requests of the CCP protocol. +/// Idle: Account does not wish to receive more routes +/// Sync: Account wishes to receive routes #[derive(Clone, Copy, PartialEq, Debug)] #[repr(u8)] pub enum Mode { @@ -60,6 +63,9 @@ impl TryFrom for Mode { } } +/// A request that ask the receiver node to transition to Idle or Sync mode. +/// If the mode is Idle, the receiver of the request will stop broadcasting routes to the sender. +/// If the mode is Sync, the receiver will start broadcasting routes to that account. #[derive(Clone, PartialEq)] pub struct RouteControlRequest { pub mode: Mode, @@ -153,8 +159,14 @@ impl RouteControlRequest { } } +impl From for Prepare { + fn from(request: RouteControlRequest) -> Self { + request.to_prepare() + } +} + #[derive(Clone, PartialEq, Debug)] -pub struct RouteProp { +pub(crate) struct RouteProp { pub(crate) is_optional: bool, pub(crate) is_transitive: bool, pub(crate) is_partial: bool, @@ -215,7 +227,7 @@ impl RouteProp { } #[derive(Clone, PartialEq)] -pub struct Route { +pub(crate) struct Route { // TODO switch this to use the Address type so we don't need separate parsing logic when implementing Debug pub(crate) prefix: String, pub(crate) path: Vec, @@ -401,6 +413,12 @@ impl RouteUpdateRequest { } } +impl From for Prepare { + fn from(request: RouteUpdateRequest) -> Self { + request.to_prepare() + } +} + #[cfg(test)] mod route_control_request { use super::*; diff --git a/crates/interledger-ccp/src/routing_table.rs b/crates/interledger-ccp/src/routing_table.rs index 5274db747..5cd8b2837 100644 --- a/crates/interledger-ccp/src/routing_table.rs +++ b/crates/interledger-ccp/src/routing_table.rs @@ -10,7 +10,7 @@ lazy_static! { static ref RANDOM: SystemRandom = SystemRandom::new(); } -#[derive(Debug)] +#[derive(Debug, Clone)] struct PrefixMap { map: HashMap, } @@ -44,7 +44,7 @@ impl PrefixMap { /// When an Interledger node reloads, it will generate a new UUID for its routing table. /// Each update applied increments the epoch number, so it acts as a version tracker. /// This helps peers make sure they are in sync with one another and request updates if not. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct RoutingTable { id: [u8; 16], epoch: u32, @@ -55,7 +55,7 @@ impl RoutingTable where A: Clone, { - pub fn new(id: [u8; 16]) -> Self { + pub(crate) fn new(id: [u8; 16]) -> Self { RoutingTable { id, epoch: 0, @@ -64,63 +64,63 @@ where } #[cfg(test)] - pub fn set_id(&mut self, id: [u8; 16]) { + pub(crate) fn set_id(&mut self, id: [u8; 16]) { self.id = id; self.epoch = 0; } #[cfg(test)] - pub fn set_epoch(&mut self, epoch: u32) { + pub(crate) fn set_epoch(&mut self, epoch: u32) { self.epoch = epoch; } - pub fn id(&self) -> [u8; 16] { + pub(crate) fn id(&self) -> [u8; 16] { self.id } - pub fn epoch(&self) -> u32 { + pub(crate) fn epoch(&self) -> u32 { self.epoch } - pub fn increment_epoch(&mut self) -> u32 { + pub(crate) fn increment_epoch(&mut self) -> u32 { let epoch = self.epoch; self.epoch += 1; epoch } /// Set a particular route, overwriting the one that was there before - pub fn set_route(&mut self, prefix: String, account: A, route: Route) { + pub(crate) fn set_route(&mut self, prefix: String, account: A, route: Route) { self.prefix_map.remove(&prefix); self.prefix_map.insert(prefix, (account, route)); } /// Remove the route for the given prefix. Returns true if that route existed before - pub fn delete_route(&mut self, prefix: &str) -> bool { + pub(crate) fn delete_route(&mut self, prefix: &str) -> bool { self.prefix_map.remove(prefix) } /// Add the given route. Returns true if that routed did not already exist - pub fn add_route(&mut self, account: A, route: Route) -> bool { + pub(crate) fn add_route(&mut self, account: A, route: Route) -> bool { self.prefix_map .insert(route.prefix.clone(), (account, route)) } /// Get the best route we have for the given prefix - pub fn get_route(&self, prefix: &str) -> Option<&(A, Route)> { + pub(crate) fn get_route(&self, prefix: &str) -> Option<&(A, Route)> { self.prefix_map.resolve(prefix) } - pub fn get_simplified_table(&self) -> HashMap<&str, A> { + pub(crate) fn get_simplified_table(&self) -> HashMap { HashMap::from_iter( self.prefix_map .map .iter() - .map(|(address, (account, _route))| (address.as_str(), account.clone())), + .map(|(address, (account, _route))| (address.clone(), account.clone())), ) } /// Handle a CCP Route Update Request from the peer this table represents - pub fn handle_update_request( + pub(crate) fn handle_update_request( &mut self, account: A, request: RouteUpdateRequest, diff --git a/crates/interledger-ccp/src/server.rs b/crates/interledger-ccp/src/server.rs index 718e557ce..0718a342c 100644 --- a/crates/interledger-ccp/src/server.rs +++ b/crates/interledger-ccp/src/server.rs @@ -1,5 +1,3 @@ -#[cfg(test)] -use crate::packet::PEER_PROTOCOL_CONDITION; use crate::{ packet::{ Mode, Route, RouteControlRequest, RouteUpdateRequest, CCP_CONTROL_DESTINATION, @@ -8,22 +6,17 @@ use crate::{ routing_table::RoutingTable, CcpRoutingAccount, RouteManagerStore, RoutingRelation, }; -use futures::{ - future::{err, join_all, ok, Either}, - Future, Stream, -}; -#[cfg(test)] -use interledger_packet::PrepareBuilder; -use interledger_packet::{Address, ErrorCode, Fulfill, Reject, RejectBuilder}; +use async_trait::async_trait; +use futures::future::join_all; +use interledger_packet::{Address, ErrorCode, RejectBuilder}; use interledger_service::{ - Account, AddressStore, BoxedIlpFuture, IncomingRequest, IncomingService, OutgoingRequest, + Account, AddressStore, IlpResult, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService, }; -#[cfg(test)] -use lazy_static::lazy_static; use log::{debug, error, trace, warn}; use parking_lot::{Mutex, RwLock}; use ring::digest::{digest, SHA256}; +use std::cmp::Ordering as StdOrdering; use std::collections::HashMap; use std::{ cmp::min, @@ -33,13 +26,16 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::{Duration, Instant}, + time::Duration, }; -use tokio_timer::Interval; use uuid::Uuid; -#[cfg(not(test))] -use tokio_executor::spawn; +#[cfg(test)] +use crate::packet::PEER_PROTOCOL_CONDITION; +#[cfg(test)] +use futures::TryFutureExt; +#[cfg(test)] +use lazy_static::lazy_static; // TODO should the route expiry be longer? we use 30 seconds now // because the expiry shortener will lower the expiry to 30 seconds @@ -116,7 +112,13 @@ where #[cfg(not(test))] { - spawn(service.start_broadcast_interval(self.broadcast_interval)); + let broadcast_interval = self.broadcast_interval; + let service_clone = service.clone(); + tokio::spawn(async move { + service_clone + .start_broadcast_interval(broadcast_interval) + .await + }); } service @@ -180,20 +182,17 @@ where A: CcpRoutingAccount + Send + Sync + 'static, { /// Returns a future that will trigger this service to update its routes and broadcast - /// updates to peers on the given interval. - pub fn start_broadcast_interval(&self, interval: u64) -> impl Future { - let clone = self.clone(); - self.request_all_routes().and_then(move |_| { - Interval::new(Instant::now(), Duration::from_millis(interval)) - .map_err(|err| error!("Interval error, no longer sending route updates: {:?}", err)) - .for_each(move |_| { - // ensure we have the latest ILP Address from the store - clone.update_ilp_address(); - // Returning an error would end the broadcast loop - // so we want to return Ok even if there was an error - clone.broadcast_routes().then(|_| Ok(())) - }) - }) + /// updates to peers on the given interval. `interval` is in milliseconds + pub async fn start_broadcast_interval(&self, interval: u64) -> Result<(), ()> { + self.request_all_routes().await?; + let mut interval = tokio::time::interval(Duration::from_millis(interval)); + loop { + interval.tick().await; + // ensure we have the latest ILP Address from the store + self.update_ilp_address(); + // Do not consume the result if an error since we want to keep the loop going + let _ = self.broadcast_routes().await; + } } fn update_ilp_address(&self) { @@ -210,52 +209,47 @@ where } } - pub fn broadcast_routes(&self) -> impl Future { - let clone = self.clone(); - self.update_best_routes(None) - .and_then(move |_| clone.send_route_updates()) + pub async fn broadcast_routes(&self) -> Result<(), ()> { + self.update_best_routes(None).await?; + self.send_route_updates().await } /// Request routes from all the peers we are willing to receive routes from. /// This is mostly intended for when the CCP server starts up and doesn't have any routes from peers. - fn request_all_routes(&self) -> impl Future { - let clone = self.clone(); - self.store - .get_accounts_to_receive_routes_from() - .then(|result| { - let accounts = result.unwrap_or_else(|_| Vec::new()); - join_all(accounts.into_iter().map(move |account| { - clone.send_route_control_request(account, DUMMY_ROUTING_TABLE_ID, 0) - })) - }) - .then(|_| Ok(())) + async fn request_all_routes(&self) -> Result<(), ()> { + let result = self.store.get_accounts_to_receive_routes_from().await; + let accounts = result.unwrap_or_else(|_| Vec::new()); + join_all( + accounts + .into_iter() + .map(|account| self.send_route_control_request(account, DUMMY_ROUTING_TABLE_ID, 0)), + ) + .await; + Ok(()) } /// Handle a CCP Route Control Request. If this is from an account that we broadcast routes to, /// we'll send an outgoing Route Update Request to them. - fn handle_route_control_request( - &self, - request: IncomingRequest, - ) -> impl Future { + async fn handle_route_control_request(&self, request: IncomingRequest) -> IlpResult { if !request.from.should_send_routes() { - return Either::A(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"We are not configured to send routes to you, sorry", triggered_by: Some(&self.ilp_address.read()), data: &[], } - .build())); + .build()); } let control = RouteControlRequest::try_from(&request.prepare); if control.is_err() { - return Either::A(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"Invalid route control request", triggered_by: Some(&self.ilp_address.read()), data: &[], } - .build())); + .build()); } let control = control.unwrap(); debug!( @@ -295,40 +289,38 @@ where #[cfg(test)] { let ilp_address = self.ilp_address.read().clone(); - return Either::B(Either::A( - self.send_route_update(request.from.clone(), from_epoch_index, to_epoch_index) - .map_err(move |_| { - RejectBuilder { - code: ErrorCode::T01_PEER_UNREACHABLE, - message: b"Error sending route update request", - data: &[], - triggered_by: Some(&ilp_address), - } - .build() - }) - .and_then(|_| Ok(CCP_RESPONSE.clone())), - )); + return self + .send_route_update(request.from.clone(), from_epoch_index, to_epoch_index) + .map_err(move |_| { + RejectBuilder { + code: ErrorCode::T01_PEER_UNREACHABLE, + message: b"Error sending route update request", + data: &[], + triggered_by: Some(&ilp_address), + } + .build() + }) + .map_ok(|_| Ok(CCP_RESPONSE.clone())) + .await?; } #[cfg(not(test))] { - spawn(self.send_route_update( - request.from.clone(), - from_epoch_index, - to_epoch_index, - )); + tokio::spawn({ + let self_clone = self.clone(); + async move { + self_clone + .send_route_update( + request.from.clone(), + from_epoch_index, + to_epoch_index, + ) + .await + } + }); } } - - #[cfg(not(test))] - { - Either::B(ok(CCP_RESPONSE.clone())) - } - - #[cfg(test)] - { - Either::B(Either::B(ok(CCP_RESPONSE.clone()))) - } + Ok(CCP_RESPONSE.clone()) } /// Remove invalid routes before processing the Route Update Request @@ -367,27 +359,27 @@ where /// If updates are applied to the Incoming Routing Table for this peer, we will /// then check whether those routes are better than the current best ones we have in the /// Local Routing Table. - fn handle_route_update_request(&self, request: IncomingRequest) -> BoxedIlpFuture { + async fn handle_route_update_request(&self, request: IncomingRequest) -> IlpResult { // Ignore the request if we don't accept routes from them if !request.from.should_receive_routes() { - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"Your route broadcasts are not accepted here", triggered_by: Some(&self.ilp_address.read()), data: &[], } - .build())); + .build()); } let update = RouteUpdateRequest::try_from(&request.prepare); if update.is_err() { - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"Invalid route update request", triggered_by: Some(&self.ilp_address.read()), data: &[], } - .build())); + .build()); } let update = update.unwrap(); debug!( @@ -399,62 +391,60 @@ where // Filter out routes that don't make sense or that we won't accept let update = self.filter_routes(update); - let mut incoming_tables = self.incoming_tables.write(); - if !&incoming_tables.contains_key(&request.from.id()) { - incoming_tables.insert( - request.from.id(), - RoutingTable::new(update.routing_table_id), - ); - } + // Ensure the mutex gets dropped before the async block + let result = { + let mut incoming_tables = self.incoming_tables.write(); + if !&incoming_tables.contains_key(&request.from.id()) { + incoming_tables.insert( + request.from.id(), + RoutingTable::new(update.routing_table_id), + ); + } + incoming_tables + .get_mut(&request.from.id()) + .expect("Should have inserted a routing table for this account") + .handle_update_request(request.from.clone(), update) + }; // Update the routing table we maintain for the account we got this from. // Figure out whether we need to update our routes for any of the prefixes // that were included in this route update. - match (*incoming_tables) - .get_mut(&request.from.id()) - .expect("Should have inserted a routing table for this account") - .handle_update_request(request.from.clone(), update) - { + match result { Ok(prefixes_updated) => { if prefixes_updated.is_empty() { trace!("Route update request did not contain any prefixes we need to update our routes for"); - return Box::new(ok(CCP_RESPONSE.clone())); + return Ok(CCP_RESPONSE.clone()); } debug!( "Recalculating best routes for prefixes: {}", prefixes_updated.join(", ") ); - let future = self.update_best_routes(Some( - prefixes_updated - .into_iter() - .map(|s| s.to_string()) - .collect(), - )); #[cfg(not(test))] { - spawn(future); - Box::new(ok(CCP_RESPONSE.clone())) + tokio::spawn({ + let self_clone = self.clone(); + async move { self_clone.update_best_routes(Some(prefixes_updated)).await } + }); } #[cfg(test)] { let ilp_address = self.ilp_address.clone(); - Box::new( - future - .map_err(move |_| { - RejectBuilder { - code: ErrorCode::T00_INTERNAL_ERROR, - message: b"Error processing route update", - data: &[], - triggered_by: Some(&ilp_address.read()), - } - .build() - }) - .and_then(|_| Ok(CCP_RESPONSE.clone())), - ) + self.update_best_routes(Some(prefixes_updated)) + .map_err(move |_| { + RejectBuilder { + code: ErrorCode::T00_INTERNAL_ERROR, + message: b"Error processing route update", + data: &[], + triggered_by: Some(&ilp_address.read()), + } + .build() + }) + .await?; } + Ok(CCP_RESPONSE.clone()) } Err(message) => { warn!("Error handling incoming Route Update request, sending a Route Control request to get updated routing table info from peer. Error was: {}", &message); @@ -465,31 +455,41 @@ where triggered_by: Some(&self.ilp_address.read()), } .build(); - let table = &incoming_tables[&request.from.id()]; - let future = self.send_route_control_request( - request.from.clone(), - table.id(), - table.epoch(), - ); + + let table = &self.incoming_tables.read().clone()[&request.from.id()]; + #[cfg(not(test))] - { - spawn(future); - Box::new(err(reject)) - } + tokio::spawn({ + let table = table.clone(); + let self_clone = self.clone(); + async move { + let _ = self_clone + .send_route_control_request( + request.from.clone(), + table.id(), + table.epoch(), + ) + .await; + } + }); + #[cfg(test)] - Box::new(future.then(move |_| Err(reject))) + let _ = self + .send_route_control_request(request.from.clone(), table.id(), table.epoch()) + .await; + Err(reject) } } } /// Request a Route Update from the specified peer. This is sent when we get /// a Route Update Request from them with a gap in the epochs since the last one we saw. - fn send_route_control_request( + async fn send_route_control_request( &self, account: A, last_known_routing_table_id: [u8; 16], last_known_epoch: u32, - ) -> impl Future { + ) -> Result<(), ()> { let account_id = account.id(); let control = RouteControlRequest { mode: Mode::Sync, @@ -503,7 +503,8 @@ where hex::encode(&last_known_routing_table_id[..]), last_known_epoch); let prepare = control.to_prepare(); - self.clone() + let result = self + .clone() .outgoing .send_request(OutgoingRequest { // TODO If we start charging or paying for CCP broadcasts we'll need to @@ -514,15 +515,15 @@ where original_amount: prepare.amount(), prepare, }) - .then(move |result| { - if let Err(err) = result { - warn!( - "Error sending Route Control Request to account {}: {:?}", - account_id, err - ) - } - Ok(()) - }) + .await; + + if let Err(err) = result { + warn!( + "Error sending Route Control Request to account {}: {:?}", + account_id, err + ) + } + Ok(()) } /// Check whether the Local Routing Table currently has the best routes for the @@ -530,10 +531,7 @@ where /// with some new or modified routes that might be better than our existing ones. /// /// If prefixes is None, this will check the best routes for all local and configured prefixes. - fn update_best_routes( - &self, - prefixes: Option>, - ) -> impl Future + 'static { + async fn update_best_routes(&self, prefixes: Option>) -> Result<(), ()> { let local_table = self.local_table.clone(); let forwarding_table = self.forwarding_table.clone(); let forwarding_table_updates = self.forwarding_table_updates.clone(); @@ -541,140 +539,138 @@ where let ilp_address = self.ilp_address.read().clone(); let mut store = self.store.clone(); - self.store.get_local_and_configured_routes().and_then( - move |(ref local_routes, ref configured_routes)| { - let (better_routes, withdrawn_routes) = { - // Note we only use a read lock here and later get a write lock if we need to update the table - let local_table = local_table.read(); - let incoming_tables = incoming_tables.read(); - - // Either check the given prefixes or check all of our local and configured routes - let prefixes_to_check: Box> = - if let Some(ref prefixes) = prefixes { - Box::new(prefixes.iter().map(|prefix| prefix.as_str())) - } else { - let routes = configured_routes.iter().chain(local_routes.iter()); - Box::new(routes.map(|(prefix, _account)| prefix.as_str())) - }; - - // Check all the prefixes to see which ones we have different routes for - // and which ones we don't have routes for anymore - let mut better_routes: Vec<(&str, A, Route)> = - Vec::with_capacity(prefixes_to_check.size_hint().0); - let mut withdrawn_routes: Vec<&str> = Vec::new(); - for prefix in prefixes_to_check { - // See which prefixes there is now a better route for - if let Some((best_next_account, best_route)) = get_best_route_for_prefix( - local_routes, - configured_routes, - &incoming_tables, - prefix, - ) { - if let Some((ref next_account, ref _route)) = - local_table.get_route(prefix) - { - if next_account.id() == best_next_account.id() { - continue; - } else { - better_routes.push(( - prefix, - best_next_account.clone(), - best_route.clone(), - )); - } - } else { - better_routes.push((prefix, best_next_account, best_route)); - } - } else { - // No longer have a route to this prefix - withdrawn_routes.push(prefix); - } - } - (better_routes, withdrawn_routes) - }; + let (local_routes, configured_routes) = + self.store.get_local_and_configured_routes().await?; - // Update the local and forwarding tables - if !better_routes.is_empty() || !withdrawn_routes.is_empty() { - let mut local_table = local_table.write(); - let mut forwarding_table = forwarding_table.write(); - let mut forwarding_table_updates = forwarding_table_updates.write(); + // TODO: Should we extract this to a function and #[inline] it? + let (better_routes, withdrawn_routes) = { + // Note we only use a read lock here and later get a write lock if we need to update the table + let local_table = local_table.read(); + let incoming_tables = incoming_tables.read(); - let mut new_routes: Vec = Vec::with_capacity(better_routes.len()); + // Either check the given prefixes or check all of our local and configured routes + let prefixes_to_check: Box> = + if let Some(ref prefixes) = prefixes { + Box::new(prefixes.iter().map(|prefix| prefix.as_str())) + } else { + let routes = configured_routes.iter().chain(local_routes.iter()); + Box::new(routes.map(|(prefix, _account)| prefix.as_str())) + }; - for (prefix, account, mut route) in better_routes { - debug!( - "Setting new route for prefix: {} -> Account: {} (id: {})", - prefix, - account.username(), - account.id(), - ); - local_table.set_route(prefix.to_string(), account.clone(), route.clone()); - - // Update the forwarding table - - // Don't advertise routes that don't start with the global prefix - // or that advertise the whole global prefix - let address_scheme = ilp_address.scheme(); - let correct_address_scheme = route.prefix.starts_with(address_scheme) - && route.prefix != address_scheme; - // We do want to advertise our address - let is_our_address = route.prefix == &ilp_address as &str; - // Don't advertise local routes because advertising only our address - // will be enough to ensure the packet gets to us and we can route it - // to the correct account on our node - let is_local_route = - route.prefix.starts_with(&ilp_address as &str) && route.path.is_empty(); - let not_local_route = is_our_address || !is_local_route; - // Don't include routes we're also withdrawing - let not_withdrawn_route = !withdrawn_routes.contains(&prefix); - - if correct_address_scheme && not_local_route && not_withdrawn_route { - let old_route = forwarding_table.get_route(prefix); - if old_route.is_none() || old_route.unwrap().0.id() != account.id() { - route.path.insert(0, ilp_address.to_string()); - // Each hop hashes the auth before forwarding - route.auth = hash(&route.auth); - forwarding_table.set_route( - prefix.to_string(), - account.clone(), - route.clone(), - ); - new_routes.push(route); - } + // Check all the prefixes to see which ones we have different routes for + // and which ones we don't have routes for anymore + let mut better_routes: Vec<(&str, A, Route)> = + Vec::with_capacity(prefixes_to_check.size_hint().0); + let mut withdrawn_routes: Vec<&str> = Vec::new(); + for prefix in prefixes_to_check { + // See which prefixes there is now a better route for + if let Some((best_next_account, best_route)) = get_best_route_for_prefix( + &local_routes, + &configured_routes, + &incoming_tables, + prefix, + ) { + if let Some((ref next_account, ref _route)) = local_table.get_route(prefix) { + if next_account.id() == best_next_account.id() { + continue; + } else { + better_routes.push(( + prefix, + best_next_account.clone(), + best_route.clone(), + )); } + } else { + better_routes.push((prefix, best_next_account, best_route)); } + } else { + // No longer have a route to this prefix + withdrawn_routes.push(prefix); + } + } + (better_routes, withdrawn_routes) + }; - for prefix in withdrawn_routes.iter() { - debug!("Removed route for prefix: {}", prefix); - local_table.delete_route(prefix); - forwarding_table.delete_route(prefix); + // Update the local and forwarding tables + if !better_routes.is_empty() || !withdrawn_routes.is_empty() { + let update_routes = { + let mut local_table = local_table.write(); + let mut forwarding_table = forwarding_table.write(); + let mut forwarding_table_updates = forwarding_table_updates.write(); + + let mut new_routes: Vec = Vec::with_capacity(better_routes.len()); + + for (prefix, account, mut route) in better_routes { + debug!( + "Setting new route for prefix: {} -> Account: {} (id: {})", + prefix, + account.username(), + account.id(), + ); + local_table.set_route(prefix.to_string(), account.clone(), route.clone()); + + // Update the forwarding table + + // Don't advertise routes that don't start with the global prefix + // or that advertise the whole global prefix + let address_scheme = ilp_address.scheme(); + let correct_address_scheme = + route.prefix.starts_with(address_scheme) && route.prefix != address_scheme; + // We do want to advertise our address + let is_our_address = route.prefix == &ilp_address as &str; + // Don't advertise local routes because advertising only our address + // will be enough to ensure the packet gets to us and we can route it + // to the correct account on our node + let is_local_route = + route.prefix.starts_with(&ilp_address as &str) && route.path.is_empty(); + let not_local_route = is_our_address || !is_local_route; + // Don't include routes we're also withdrawing + let not_withdrawn_route = !withdrawn_routes.contains(&prefix); + + if correct_address_scheme && not_local_route && not_withdrawn_route { + let old_route = forwarding_table.get_route(prefix); + if old_route.is_none() || old_route.unwrap().0.id() != account.id() { + route.path.insert(0, ilp_address.to_string()); + // Each hop hashes the auth before forwarding + route.auth = hash(&route.auth); + forwarding_table.set_route( + prefix.to_string(), + account.clone(), + route.clone(), + ); + new_routes.push(route); + } } + } - let epoch = forwarding_table.increment_epoch(); - forwarding_table_updates.push(( - new_routes, - withdrawn_routes.iter().map(|s| s.to_string()).collect(), - )); - debug_assert_eq!(epoch as usize + 1, forwarding_table_updates.len()); - - Either::A( - store.set_routes( - local_table - .get_simplified_table() - .into_iter() - .map(|(prefix, account)| (prefix.to_string(), account)), - ), - ) - } else { - // The routing table hasn't changed - Either::B(ok(())) + for prefix in withdrawn_routes.iter() { + debug!("Removed route for prefix: {}", prefix); + local_table.delete_route(prefix); + forwarding_table.delete_route(prefix); } - }, - ) + + let epoch = forwarding_table.increment_epoch(); + forwarding_table_updates.push(( + new_routes, + withdrawn_routes + .into_iter() + .map(|s| s.to_string()) + .collect(), + )); + debug_assert_eq!(epoch as usize + 1, forwarding_table_updates.len()); + + store.set_routes(local_table.get_simplified_table()) + }; + + update_routes.await + } else { + // The routing table hasn't changed + Ok(()) + } } /// Send RouteUpdateRequests to all peers that we send routing messages to - fn send_route_updates(&self) -> impl Future { + async fn send_route_updates(&self) -> Result<(), ()> { let self_clone = self.clone(); let unavailable_accounts = self.unavailable_accounts.clone(); // Check which accounts we should skip this iteration @@ -690,95 +686,109 @@ where } skip }; + trace!("Skipping accounts: {:?}", accounts_to_skip); - self.store + let mut accounts = self + .store .get_accounts_to_send_routes_to(accounts_to_skip) - .and_then(move |mut accounts| { - let mut outgoing = self_clone.outgoing.clone(); - let to_epoch_index = self_clone.forwarding_table.read().epoch(); - let from_epoch_index = self_clone.last_epoch_updates_sent_for.swap(to_epoch_index, Ordering::SeqCst); - - let route_update_request = - self_clone.create_route_update(from_epoch_index, to_epoch_index); + .await?; + + let to_epoch_index = self_clone.forwarding_table.read().epoch(); + let from_epoch_index = self_clone + .last_epoch_updates_sent_for + .swap(to_epoch_index, Ordering::SeqCst); + + let route_update_request = self_clone.create_route_update(from_epoch_index, to_epoch_index); + + let prepare = route_update_request.to_prepare(); + accounts.sort_unstable_by_key(|a| a.id().to_string()); + accounts.dedup_by_key(|a| a.id()); + + let broadcasting = !accounts.is_empty(); + if broadcasting { + trace!( + "Sending route update for epochs {} - {} to accounts: {:?} {}", + from_epoch_index, + to_epoch_index, + route_update_request, + { + let account_list: Vec = accounts + .iter() + .map(|a| { + format!( + "{} (id: {}, ilp_address: {})", + a.username(), + a.id(), + a.ilp_address() + ) + }) + .collect(); + account_list.join(", ") + } + ); - let prepare = route_update_request.to_prepare(); - accounts.sort_unstable_by_key(|a| a.id().to_string()); - accounts.dedup_by_key(|a| a.id()); + // TODO: How can this be converted to a join_all expression? + // futures 0.1 version worked by doing `join_all(accounts.into_iter().map(...)).and_then(...)` + // It is odd that the same but with `.await` instead does not work. + let mut outgoing = self_clone.outgoing.clone(); + let mut results = Vec::new(); + for account in accounts.into_iter() { + let res = outgoing + .send_request(OutgoingRequest { + from: account.clone(), + to: account.clone(), + original_amount: prepare.amount(), + prepare: prepare.clone(), + }) + .await; + results.push((account, res)); + } - let broadcasting = !accounts.is_empty(); - if broadcasting { - trace!( - "Sending route update for epochs {} - {} to accounts: {:?} {}", - from_epoch_index, - to_epoch_index, - route_update_request, - { - let account_list: Vec = accounts - .iter() - .map(|a| { - format!( - "{} (id: {}, ilp_address: {})", - a.username(), - a.id(), - a.ilp_address() - ) - }) - .collect(); - account_list.join(", ") + // Handle the results of the route broadcast attempts + trace!("Updating unavailable accounts"); + let mut unavailable_accounts = unavailable_accounts.lock(); + for (account, result) in results.into_iter() { + match (account.routing_relation(), result) { + (RoutingRelation::Child, Err(err)) => { + if let Some(backoff) = unavailable_accounts.get_mut(&account.id()) { + // Increase the number of intervals we'll skip + // (but don't overflow the value it's stored in) + backoff.max = backoff.max.saturating_add(1); + backoff.skip_intervals = backoff.max; + } else { + // Skip sending to this account next time + unavailable_accounts.insert( + account.id(), + BackoffParams { + max: 1, + skip_intervals: 1, + }, + ); } - ); - Either::A( - join_all(accounts.into_iter().map(move |account| { - outgoing - .send_request(OutgoingRequest { - from: account.clone(), - to: account.clone(), - original_amount: prepare.amount(), - prepare: prepare.clone(), - }) - .then(move |res| Ok((account, res))) - })) - .and_then(move |results: Vec<(A, Result)>| { - // Handle the results of the route broadcast attempts - trace!("Updating unavailable accounts"); - let mut unavailable_accounts = unavailable_accounts.lock(); - for (account, result) in results.into_iter() { - match (account.routing_relation(), result) { - (RoutingRelation::Child, Err(err)) => { - if let Some(backoff) = unavailable_accounts.get_mut(&account.id()) { - // Increase the number of intervals we'll skip - // (but don't overflow the value it's stored in) - backoff.max = backoff.max.saturating_add(1); - backoff.skip_intervals = backoff.max; - } else { - // Skip sending to this account next time - unavailable_accounts.insert(account.id(), BackoffParams { - max: 1, - skip_intervals: 1, - }); - } - trace!("Error sending route update to {:?} account {} (id: {}), increased backoff to {}: {:?}", - account.routing_relation(), account.username(), account.id(), unavailable_accounts[&account.id()].max, err); - }, - (_, Err(err)) => { - warn!("Error sending route update to {:?} account {} (id: {}): {:?}", - account.routing_relation(), account.username(), account.id(), err); - }, - (_, Ok(_)) => { - if unavailable_accounts.remove(&account.id()).is_some() { - debug!("Account {} (id: {}) is no longer unavailable, resuming route broadcasts", account.username(), account.id()); - } - } - } - } - Ok(()) - }), - ) - } else { - trace!("No accounts to broadcast routes to"); - Either::B(ok(())) + trace!("Error sending route update to {:?} account {} (id: {}), increased backoff to {}: {:?}", + account.routing_relation(), account.username(), account.id(), unavailable_accounts[&account.id()].max, err); + } + (_, Err(err)) => { + warn!( + "Error sending route update to {:?} account {} (id: {}): {:?}", + account.routing_relation(), + account.username(), + account.id(), + err + ); + } + (_, Ok(_)) => { + if unavailable_accounts.remove(&account.id()).is_some() { + debug!("Account {} (id: {}) is no longer unavailable, resuming route broadcasts", account.username(), account.id()); + } + } } - }) + } + Ok(()) + } else { + trace!("No accounts to broadcast routes to"); + Ok(()) + } } /// Create a RouteUpdateRequest representing the given range of Forwarding Routing Table epochs. @@ -852,8 +862,8 @@ where from_epoch_index, to_epoch_index, current_epoch_index, - new_routes: new_routes.clone(), - withdrawn_routes: withdrawn_routes.clone(), + new_routes, + withdrawn_routes, speaker: self.ilp_address.read().clone(), hold_down_time: DEFAULT_ROUTE_EXPIRY_TIME, } @@ -861,12 +871,12 @@ where /// Send a Route Update Request to a specific account for the given epoch range. /// This is used when the peer has fallen behind and has requested a specific range of updates. - fn send_route_update( + async fn send_route_update( &self, account: A, from_epoch_index: u32, to_epoch_index: u32, - ) -> impl Future { + ) -> Result<(), ()> { let prepare = self .create_route_update(from_epoch_index, to_epoch_index) .to_prepare(); @@ -875,7 +885,8 @@ where "Sending individual route update to account: {} for epochs from: {} to: {}", account_id, from_epoch_index, to_epoch_index ); - self.outgoing + let result = self + .outgoing .clone() .send_request(OutgoingRequest { from: account.clone(), @@ -883,16 +894,15 @@ where original_amount: prepare.amount(), prepare, }) - .and_then(|_| Ok(())) - .then(move |result| { - if let Err(err) = result { - error!( - "Error sending route update to account {}: {:?}", - account_id, err - ) - } - Ok(()) - }) + .await; + + if let Err(err) = result { + error!( + "Error sending route update to account {}: {:?}", + account_id, err + ) + } + Ok(()) } } @@ -943,24 +953,27 @@ fn get_best_route_for_prefix( (account, route), |(best_account, best_route), (account, route)| { // Prioritize child > peer > parent - if best_account.routing_relation() > account.routing_relation() { - return (best_account, best_route); - } else if best_account.routing_relation() < account.routing_relation() { - return (account, route); - } - - // Prioritize shortest path - if best_route.path.len() < route.path.len() { - return (best_account, best_route); - } else if best_route.path.len() > route.path.len() { - return (account, route); - } - - // Finally base it on account ID - if best_account.id().to_string() < account.id().to_string() { - (best_account, best_route) - } else { - (account, route) + match best_account + .routing_relation() + .cmp(&account.routing_relation()) + { + StdOrdering::Greater => (best_account, best_route), + StdOrdering::Less => (account, route), + _ => { + // Prioritize shortest path + match best_route.path.len().cmp(&route.path.len()) { + StdOrdering::Less => (best_account, best_route), + StdOrdering::Greater => (account, route), + _ => { + // Finally base it on account ID + if best_account.id().to_string() < account.id().to_string() { + (best_account, best_route) + } else { + (account, route) + } + } + } + } } }, ); @@ -970,6 +983,7 @@ fn get_best_route_for_prefix( } } +#[async_trait] impl IncomingService for CcpRouteManager where I: IncomingService + Clone + Send + Sync + 'static, @@ -977,18 +991,16 @@ where S: AddressStore + RouteManagerStore + Clone + Send + Sync + 'static, A: CcpRoutingAccount + Send + Sync + 'static, { - type Future = BoxedIlpFuture; - /// Handle the IncomingRequest if it is a CCP protocol message or /// pass it on to the next handler if not - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let destination = request.prepare.destination(); if destination == *CCP_CONTROL_DESTINATION { - Box::new(self.handle_route_control_request(request)) + self.handle_route_control_request(request).await } else if destination == *CCP_UPDATE_DESTINATION { - Box::new(self.handle_route_update_request(request)) + self.handle_route_update_request(request).await } else { - Box::new(self.next_incoming.handle_request(request)) + self.next_incoming.handle_request(request).await } } } @@ -1030,7 +1042,7 @@ mod ranking_routes { let mut child = TestAccount::new(Uuid::from_slice(&[6; 16]).unwrap(), "example.child"); child.relation = RoutingRelation::Child; child_table.add_route( - child.clone(), + child, Route { prefix: "example.d".to_string(), path: vec!["example.one".to_string()], @@ -1059,7 +1071,7 @@ mod ranking_routes { }, ); peer_table_1.add_route( - peer_1.clone(), + peer_1, Route { // This route should be overridden by the configured "example.a" route prefix: "example.a.sub-prefix".to_string(), @@ -1071,7 +1083,7 @@ mod ranking_routes { let mut peer_table_2 = RoutingTable::default(); let peer_2 = TestAccount::new(Uuid::from_slice(&[8; 16]).unwrap(), "example.peer2"); peer_table_2.add_route( - peer_2.clone(), + peer_2, Route { prefix: "example.e".to_string(), path: vec!["example.one".to_string(), "example.two".to_string()], @@ -1141,28 +1153,29 @@ mod handle_route_control_request { use super::*; use crate::fixtures::*; use crate::test_helpers::*; + use interledger_packet::PrepareBuilder; use std::time::{Duration, SystemTime}; - #[test] - fn handles_valid_request() { + #[tokio::test] + async fn handles_valid_request() { test_service_with_routes() .0 .handle_request(IncomingRequest { prepare: CONTROL_REQUEST.to_prepare(), from: ROUTING_ACCOUNT.clone(), }) - .wait() + .await .unwrap(); } - #[test] - fn rejects_from_non_sending_account() { + #[tokio::test] + async fn rejects_from_non_sending_account() { let result = test_service() .handle_request(IncomingRequest { prepare: CONTROL_REQUEST.to_prepare(), from: NON_ROUTING_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1170,8 +1183,8 @@ mod handle_route_control_request { ); } - #[test] - fn rejects_invalid_packet() { + #[tokio::test] + async fn rejects_invalid_packet() { let result = test_service() .handle_request(IncomingRequest { prepare: PrepareBuilder { @@ -1184,7 +1197,7 @@ mod handle_route_control_request { .build(), from: ROUTING_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1192,11 +1205,11 @@ mod handle_route_control_request { ); } - #[test] - fn sends_update_in_response() { + #[tokio::test] + async fn sends_update_in_response() { let (mut service, outgoing_requests) = test_service_with_routes(); (*service.forwarding_table.write()).set_id([0; 16]); - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), @@ -1208,7 +1221,7 @@ mod handle_route_control_request { } .to_prepare(), }) - .wait() + .await .unwrap(); let request: &OutgoingRequest = &outgoing_requests.lock()[0]; assert_eq!(request.to.id(), ROUTING_ACCOUNT.id()); @@ -1220,10 +1233,10 @@ mod handle_route_control_request { assert_eq!(update.new_routes.len(), 3); } - #[test] - fn sends_whole_table_if_id_is_different() { + #[tokio::test] + async fn sends_whole_table_if_id_is_different() { let (mut service, outgoing_requests) = test_service_with_routes(); - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), @@ -1235,7 +1248,7 @@ mod handle_route_control_request { } .to_prepare(), }) - .wait() + .await .unwrap(); let routing_table_id = service.forwarding_table.read().id(); let request: &OutgoingRequest = &outgoing_requests.lock()[0]; @@ -1254,13 +1267,14 @@ mod handle_route_update_request { use super::*; use crate::fixtures::*; use crate::test_helpers::*; + use interledger_packet::PrepareBuilder; use std::{ iter::FromIterator, time::{Duration, SystemTime}, }; - #[test] - fn handles_valid_request() { + #[tokio::test] + async fn handles_valid_request() { let mut service = test_service(); let mut update = UPDATE_REQUEST_SIMPLE.clone(); update.to_epoch_index = 1; @@ -1271,18 +1285,18 @@ mod handle_route_update_request { prepare: update.to_prepare(), from: ROUTING_ACCOUNT.clone(), }) - .wait() + .await .unwrap(); } - #[test] - fn rejects_from_child_account() { + #[tokio::test] + async fn rejects_from_child_account() { let result = test_service() .handle_request(IncomingRequest { prepare: UPDATE_REQUEST_SIMPLE.to_prepare(), from: CHILD_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1290,14 +1304,14 @@ mod handle_route_update_request { ); } - #[test] - fn rejects_from_non_routing_account() { + #[tokio::test] + async fn rejects_from_non_routing_account() { let result = test_service() .handle_request(IncomingRequest { prepare: UPDATE_REQUEST_SIMPLE.to_prepare(), from: NON_ROUTING_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1305,8 +1319,8 @@ mod handle_route_update_request { ); } - #[test] - fn rejects_invalid_packet() { + #[tokio::test] + async fn rejects_invalid_packet() { let result = test_service() .handle_request(IncomingRequest { prepare: PrepareBuilder { @@ -1319,7 +1333,7 @@ mod handle_route_update_request { .build(), from: ROUTING_ACCOUNT.clone(), }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), @@ -1327,8 +1341,8 @@ mod handle_route_update_request { ); } - #[test] - fn adds_table_on_first_request() { + #[tokio::test] + async fn adds_table_on_first_request() { let mut service = test_service(); let mut update = UPDATE_REQUEST_SIMPLE.clone(); update.to_epoch_index = 1; @@ -1339,13 +1353,13 @@ mod handle_route_update_request { prepare: update.to_prepare(), from: ROUTING_ACCOUNT.clone(), }) - .wait() + .await .unwrap(); assert_eq!(service.incoming_tables.read().len(), 1); } - #[test] - fn filters_routes_with_other_address_scheme() { + #[tokio::test] + async fn filters_routes_with_other_address_scheme() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { @@ -1365,8 +1379,8 @@ mod handle_route_update_request { assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } - #[test] - fn filters_routes_for_address_scheme() { + #[tokio::test] + async fn filters_routes_for_address_scheme() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { @@ -1386,8 +1400,8 @@ mod handle_route_update_request { assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } - #[test] - fn filters_routing_loops() { + #[tokio::test] + async fn filters_routing_loops() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { @@ -1411,8 +1425,8 @@ mod handle_route_update_request { assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } - #[test] - fn filters_own_prefix_routes() { + #[tokio::test] + async fn filters_own_prefix_routes() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { @@ -1432,8 +1446,8 @@ mod handle_route_update_request { assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } - #[test] - fn updates_local_routing_table() { + #[tokio::test] + async fn updates_local_routing_table() { let mut service = test_service(); let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; @@ -1443,7 +1457,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); assert_eq!( (*service.local_table.read()) @@ -1463,8 +1477,8 @@ mod handle_route_update_request { ); } - #[test] - fn writes_local_routing_table_to_store() { + #[tokio::test] + async fn writes_local_routing_table_to_store() { let mut service = test_service(); let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; @@ -1474,7 +1488,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); assert_eq!( service @@ -1498,8 +1512,8 @@ mod handle_route_update_request { ); } - #[test] - fn doesnt_overwrite_configured_or_local_routes() { + #[tokio::test] + async fn doesnt_overwrite_configured_or_local_routes() { let mut service = test_service(); let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); @@ -1523,7 +1537,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); assert_eq!( (*service.local_table.read()) @@ -1543,8 +1557,8 @@ mod handle_route_update_request { ); } - #[test] - fn removes_withdrawn_routes() { + #[tokio::test] + async fn removes_withdrawn_routes() { let mut service = test_service(); let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; @@ -1554,7 +1568,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); service .handle_request(IncomingRequest { @@ -1571,7 +1585,7 @@ mod handle_route_update_request { } .to_prepare(), }) - .wait() + .await .unwrap(); assert_eq!( @@ -1587,8 +1601,8 @@ mod handle_route_update_request { .is_none()); } - #[test] - fn sends_control_request_if_routing_table_id_changed() { + #[tokio::test] + async fn sends_control_request_if_routing_table_id_changed() { let (mut service, outgoing_requests) = test_service_with_routes(); // First request is valid let mut request1 = UPDATE_REQUEST_COMPLEX.clone(); @@ -1599,7 +1613,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request1.to_prepare(), }) - .wait() + .await .unwrap(); // Second has a gap in epochs @@ -1612,7 +1626,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request2.to_prepare(), }) - .wait() + .await .unwrap_err(); assert_eq!(err.code(), ErrorCode::F00_BAD_REQUEST); @@ -1625,8 +1639,8 @@ mod handle_route_update_request { ); } - #[test] - fn sends_control_request_if_missing_epochs() { + #[tokio::test] + async fn sends_control_request_if_missing_epochs() { let (mut service, outgoing_requests) = test_service_with_routes(); // First request is valid @@ -1638,7 +1652,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap(); // Second has a gap in epochs @@ -1650,7 +1664,7 @@ mod handle_route_update_request { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) - .wait() + .await .unwrap_err(); assert_eq!(err.code(), ErrorCode::F00_BAD_REQUEST); @@ -1665,8 +1679,8 @@ mod create_route_update { use super::*; use crate::test_helpers::*; - #[test] - fn heartbeat_message_for_empty_table() { + #[tokio::test] + async fn heartbeat_message_for_empty_table() { let service = test_service(); let update = service.create_route_update(0, 0); assert_eq!(update.from_epoch_index, 0); @@ -1678,8 +1692,8 @@ mod create_route_update { assert!(update.withdrawn_routes.is_empty()); } - #[test] - fn includes_the_given_range_of_epochs() { + #[tokio::test] + async fn includes_the_given_range_of_epochs() { let service = test_service(); (*service.forwarding_table.write()).set_epoch(4); *service.forwarding_table_updates.write() = vec![ @@ -1746,10 +1760,10 @@ mod send_route_updates { use interledger_service::*; use std::{collections::HashSet, iter::FromIterator, str::FromStr}; - #[test] - fn broadcasts_to_all_accounts_we_send_updates_to() { + #[tokio::test] + async fn broadcasts_to_all_accounts_we_send_updates_to() { let (service, outgoing_requests) = test_service_with_routes(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); let accounts: HashSet = outgoing_requests .lock() .iter() @@ -1765,14 +1779,14 @@ mod send_route_updates { assert_eq!(accounts, expected); } - #[test] - fn broadcasts_configured_and_local_routes() { + #[tokio::test] + async fn broadcasts_configured_and_local_routes() { let (service, outgoing_requests) = test_service_with_routes(); // This is normally spawned as a task when the service is created - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); let update = RouteUpdateRequest::try_from(&outgoing_requests.lock()[0].prepare).unwrap(); assert_eq!(update.new_routes.len(), 3); let prefixes: Vec<&str> = update @@ -1784,12 +1798,12 @@ mod send_route_updates { assert!(prefixes.contains(&"example.configured.1")); } - #[test] - fn broadcasts_received_routes() { + #[tokio::test] + async fn broadcasts_received_routes() { let (service, outgoing_requests) = test_service_with_routes(); // This is normally spawned as a task when the service is created - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); service .handle_route_update_request(IncomingRequest { @@ -1811,10 +1825,10 @@ mod send_route_updates { } .to_prepare(), }) - .wait() + .await .unwrap(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); let update = RouteUpdateRequest::try_from(&outgoing_requests.lock()[0].prepare).unwrap(); assert_eq!(update.new_routes.len(), 4); let prefixes: Vec<&str> = update @@ -1827,13 +1841,13 @@ mod send_route_updates { assert!(prefixes.contains(&"example.remote")); } - #[test] - fn broadcasts_withdrawn_routes() { + #[tokio::test] + async fn broadcasts_withdrawn_routes() { let id10 = Uuid::from_slice(&[10; 16]).unwrap(); let (service, outgoing_requests) = test_service_with_routes(); // This is normally spawned as a task when the service is created - service.update_best_routes(None).wait().unwrap(); + service.update_best_routes(None).await.unwrap(); service .handle_route_update_request(IncomingRequest { @@ -1855,7 +1869,7 @@ mod send_route_updates { } .to_prepare(), }) - .wait() + .await .unwrap(); service .handle_route_update_request(IncomingRequest { @@ -1872,10 +1886,10 @@ mod send_route_updates { } .to_prepare(), }) - .wait() + .await .unwrap(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); let update = RouteUpdateRequest::try_from(&outgoing_requests.lock()[0].prepare).unwrap(); assert_eq!(update.new_routes.len(), 3); let prefixes: Vec<&str> = update @@ -1890,8 +1904,8 @@ mod send_route_updates { assert_eq!(update.withdrawn_routes[0], "example.remote"); } - #[test] - fn backs_off_sending_to_unavailable_child_accounts() { + #[tokio::test] + async fn backs_off_sending_to_unavailable_child_accounts() { let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); let local_routes = HashMap::from_iter(vec![ @@ -1932,18 +1946,18 @@ mod send_route_updates { store, outgoing, incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), ) .ilp_address(Address::from_str("example.connector").unwrap()) .to_service(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // The first time, the child request is rejected assert_eq!(outgoing_requests.lock().len(), 2); @@ -1957,7 +1971,7 @@ mod send_route_updates { } *outgoing_requests.lock() = Vec::new(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // When we send again, we skip the child assert_eq!(outgoing_requests.lock().len(), 1); @@ -1971,7 +1985,7 @@ mod send_route_updates { } *outgoing_requests.lock() = Vec::new(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // When we send again, we try the child but it still won't work assert_eq!(outgoing_requests.lock().len(), 2); @@ -1985,8 +1999,8 @@ mod send_route_updates { } } - #[test] - fn resets_backoff_on_route_control_request() { + #[tokio::test] + async fn resets_backoff_on_route_control_request() { let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); let child_account = TestAccount { @@ -2028,18 +2042,18 @@ mod send_route_updates { store, outgoing, incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), ) .ilp_address(Address::from_str("example.connector").unwrap()) .to_service(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // The first time, the child request is rejected assert_eq!(outgoing_requests.lock().len(), 2); @@ -2057,7 +2071,7 @@ mod send_route_updates { prepare: CONTROL_REQUEST.to_prepare(), from: child_account, }) - .wait() + .await .unwrap(); { let lock = service.unavailable_accounts.lock(); @@ -2065,7 +2079,7 @@ mod send_route_updates { } *outgoing_requests.lock() = Vec::new(); - service.send_route_updates().wait().unwrap(); + service.send_route_updates().await.unwrap(); // When we send again, we don't skip the child because we got a request from them assert_eq!(outgoing_requests.lock().len(), 2); diff --git a/crates/interledger-ccp/src/test_helpers.rs b/crates/interledger-ccp/src/test_helpers.rs index a201c84a4..33a3d5f83 100644 --- a/crates/interledger-ccp/src/test_helpers.rs +++ b/crates/interledger-ccp/src/test_helpers.rs @@ -1,16 +1,12 @@ /* kcov-ignore-start */ use super::*; use crate::{packet::CCP_RESPONSE, server::CcpRouteManager}; -use futures::{ - future::{err, ok}, - Future, -}; +use async_trait::async_trait; use interledger_packet::{Address, ErrorCode, RejectBuilder}; use interledger_service::{ - incoming_service_fn, outgoing_service_fn, AddressStore, BoxedIlpFuture, IncomingService, - OutgoingRequest, OutgoingService, Username, + incoming_service_fn, outgoing_service_fn, AddressStore, IncomingService, OutgoingRequest, + OutgoingService, Username, }; -#[cfg(test)] use lazy_static::lazy_static; use parking_lot::Mutex; use std::collections::HashMap; @@ -112,16 +108,14 @@ impl TestStore { type RoutingTable = HashMap; +#[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } @@ -131,22 +125,20 @@ impl AddressStore for TestStore { } } +#[async_trait] impl RouteManagerStore for TestStore { type Account = TestAccount; - fn get_local_and_configured_routes( + async fn get_local_and_configured_routes( &self, - ) -> Box< - dyn Future, RoutingTable), Error = ()> - + Send, - > { - Box::new(ok((self.local.clone(), self.configured.clone()))) + ) -> Result<(RoutingTable, RoutingTable), ()> { + Ok((self.local.clone(), self.configured.clone())) } - fn get_accounts_to_send_routes_to( + async fn get_accounts_to_send_routes_to( &self, ignore_accounts: Vec, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let mut accounts: Vec = self .local .values() @@ -158,12 +150,10 @@ impl RouteManagerStore for TestStore { .cloned() .collect(); accounts.dedup_by_key(|a| a.id()); - Box::new(ok(accounts)) + Ok(accounts) } - fn get_accounts_to_receive_routes_from( - &self, - ) -> Box, Error = ()> + Send> { + async fn get_accounts_to_receive_routes_from(&self) -> Result, ()> { let mut accounts: Vec = self .local .values() @@ -173,21 +163,21 @@ impl RouteManagerStore for TestStore { .cloned() .collect(); accounts.dedup_by_key(|a| a.id()); - Box::new(ok(accounts)) + Ok(accounts) } - fn set_routes( + async fn set_routes( &mut self, - routes: impl IntoIterator, - ) -> Box + Send> { + routes: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()> { *self.routes.lock() = HashMap::from_iter(routes.into_iter()); - Box::new(ok(())) + Ok(()) } } pub fn test_service() -> CcpRouteManager< - impl IncomingService + Clone, - impl OutgoingService + Clone, + impl IncomingService + Clone, + impl OutgoingService + Clone, TestStore, TestAccount, > { @@ -196,22 +186,22 @@ pub fn test_service() -> CcpRouteManager< addr.clone(), TestStore::new(), outgoing_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other outgoing handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), ) .ilp_address(addr) @@ -222,8 +212,8 @@ type OutgoingRequests = Arc>>>; pub fn test_service_with_routes() -> ( CcpRouteManager< - impl IncomingService + Clone, - impl OutgoingService + Clone, + impl IncomingService + Clone, + impl OutgoingService + Clone, TestStore, TestAccount, >, @@ -261,13 +251,13 @@ pub fn test_service_with_routes() -> ( store, outgoing, incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } - .build())) + .build()) }), ) .ilp_address(addr) diff --git a/crates/interledger-http/Cargo.toml b/crates/interledger-http/Cargo.toml index 80e660536..a659fe81e 100644 --- a/crates/interledger-http/Cargo.toml +++ b/crates/interledger-http/Cargo.toml @@ -8,23 +8,25 @@ edition = "2018" repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] -bytes = { version = "0.4.12", default-features = false } -futures = { version = "0.1.29", default-features = false } +bytes = { version = "0.5", default-features = false } +futures = { version = "0.3", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } log = { version = "0.4.8", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10.0", default-features = false, features = ["default-tls"] } url = { version = "2.1.0", default-features = false } -warp = { version = "0.1.20", default-features = false } +warp = { version = "0.2", default-features = false } serde = { version = "1.0.101", default-features = false, features = ["derive"] } serde_json = { version = "1.0.41", default-features = false } serde_path_to_error = { version = "0.1", default-features = false } -http = { version = "0.1.18", default-features = false } +http = { version = "0.2.0", default-features = false } chrono = { version = "0.4.9", features = ["clock"], default-features = false } regex = { version ="1.3.1", default-features = false, features = ["std"] } lazy_static = { version ="1.4.0", default-features = false } mime = { version ="0.3.14", default-features = false } -secrecy = "0.5.2" +secrecy = "0.6" +async-trait = "0.1.22" [dev-dependencies] uuid = { version = "0.8.1", features=["v4"]} +tokio = { version = "0.2.6", features = ["rt-core", "macros"]} diff --git a/crates/interledger-http/src/client.rs b/crates/interledger-http/src/client.rs index bb55a11e9..d45700674 100644 --- a/crates/interledger-http/src/client.rs +++ b/crates/interledger-http/src/client.rs @@ -1,20 +1,31 @@ use super::{HttpAccount, HttpStore}; +use async_trait::async_trait; use bytes::BytesMut; -use futures::{future::result, Future, Stream}; -use interledger_packet::{Address, ErrorCode, Fulfill, Packet, Reject, RejectBuilder}; +use futures::future::TryFutureExt; +use interledger_packet::{Address, ErrorCode, Packet, RejectBuilder}; use interledger_service::*; use log::{error, trace}; use reqwest::{ header::{HeaderMap, HeaderName, HeaderValue}, - r#async::{Chunk, Client, ClientBuilder, Response as HttpResponse}, + Client, ClientBuilder, Response as HttpResponse, }; use secrecy::{ExposeSecret, SecretString}; use std::{convert::TryFrom, marker::PhantomData, sync::Arc, time::Duration}; +/// The HttpClientService implements [OutgoingService](../../interledger_service/trait.OutgoingService) +/// for sending ILP Prepare packets over to the HTTP URL associated with the provided account +/// If no [ILP-over-HTTP](https://interledger.org/rfcs/0035-ilp-over-http) URL is specified for +/// the account in the request, then it is forwarded to the next service. #[derive(Clone)] pub struct HttpClientService { + /// An HTTP client configured with a 30 second timeout by default. It is used to send the + /// ILP over HTTP messages to the peer client: Client, + /// The store used by the client to get the node's ILP Address, + /// used to populate the `triggered_by` field in Reject packets store: Arc, + /// The next outgoing service to which non ILP-over-HTTP requests should + /// be forwarded to next: O, account_type: PhantomData, } @@ -25,6 +36,7 @@ where O: OutgoingService + Clone, A: HttpAccount, { + /// Constructs the HttpClientService pub fn new(store: S, next: O) -> Self { let mut headers = HeaderMap::with_capacity(2); headers.insert( @@ -46,18 +58,18 @@ where } } +#[async_trait] impl OutgoingService for HttpClientService where - S: AddressStore + HttpStore, - O: OutgoingService, - A: HttpAccount, + S: AddressStore + HttpStore + Clone, + O: OutgoingService + Clone + Sync + Send, + A: HttpAccount + Clone + Sync + Send, { - type Future = BoxedIlpFuture; - /// Send an OutgoingRequest to a peer that implements the ILP-Over-HTTP. - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); let ilp_address_clone = ilp_address.clone(); + let self_clone = self.clone(); if let Some(url) = request.to.get_http_url() { trace!( "Sending outgoing ILP over HTTP packet to account: {} (URL: {})", @@ -68,51 +80,49 @@ where .to .get_http_auth_token() .unwrap_or_else(|| SecretString::new("".to_owned())); - Box::new( - self.client - .post(url.as_ref()) - .header( - "authorization", - &format!("Bearer {}", token.expose_secret()), - ) - .body(BytesMut::from(request.prepare).freeze()) - .send() - .map_err(move |err| { - error!("Error sending HTTP request: {:?}", err); - let code = if err.is_client_error() { - ErrorCode::F00_BAD_REQUEST - } else { - ErrorCode::T01_PEER_UNREACHABLE - }; - let message = if let Some(status) = err.status() { - format!("Error sending ILP over HTTP request: {}", status) - } else if let Some(err) = err.get_ref() { - format!("Error sending ILP over HTTP request: {:?}", err) - } else { - "Error sending ILP over HTTP request".to_string() - }; - RejectBuilder { - code, - message: message.as_str().as_bytes(), - triggered_by: Some(&ilp_address), - data: &[], + let header = format!("Bearer {}", token.expose_secret()); + let body = request.prepare.as_ref().to_owned(); + let resp = self_clone + .client + .post(url.as_ref()) + .header("authorization", &header) + .body(body) + .send() + .map_err(move |err| { + error!("Error sending HTTP request: {:?}", err); + let mut code = ErrorCode::T01_PEER_UNREACHABLE; + if let Some(status) = err.status() { + if status.is_client_error() { + code = ErrorCode::F00_BAD_REQUEST } - .build() - }) - .and_then(move |resp| parse_packet_from_response(resp, ilp_address_clone)), - ) + }; + + let message = format!("Error sending ILP over HTTP request: {}", err); + RejectBuilder { + code, + message: message.as_bytes(), + triggered_by: Some(&ilp_address), + data: &[], + } + .build() + }) + .await?; + parse_packet_from_response(resp, ilp_address_clone).await } else { - Box::new(self.next.send_request(request)) + self.next.send_request(request).await } } } -fn parse_packet_from_response( - response: HttpResponse, - ilp_address: Address, -) -> impl Future { - let ilp_address_clone = ilp_address.clone(); - result(response.error_for_status().map_err(|err| { +/// Parses an ILP over HTTP response. +/// +/// # Errors +/// 1. If the response's status code is an error +/// 1. If the response's body cannot be parsed as bytes +/// 1. If the response's body is not a valid Packet (Fulfill or Reject) +/// 1. If the packet is a Reject packet +async fn parse_packet_from_response(response: HttpResponse, ilp_address: Address) -> IlpResult { + let response = response.error_for_status().map_err(|err| { error!("HTTP error sending ILP over HTTP packet: {:?}", err); let code = if let Some(status) = err.status() { if status.is_client_error() { @@ -131,34 +141,33 @@ fn parse_packet_from_response( data: &[], } .build() - })) - .and_then(move |response: HttpResponse| { - let ilp_address_clone = ilp_address.clone(); - let decoder = response.into_body(); - decoder.concat2().map_err(move |err| { + })?; + + let ilp_address_clone = ilp_address.clone(); + let body = response + .bytes() + .map_err(|err| { error!("Error getting HTTP response body: {:?}", err); RejectBuilder { code: ErrorCode::T01_PEER_UNREACHABLE, message: &[], - triggered_by: Some(&ilp_address_clone.clone()), + triggered_by: Some(&ilp_address_clone), data: &[], } .build() }) - }) - .and_then(move |body: Chunk| { - // TODO can we get the body as a BytesMut so we don't need to copy? - let body = BytesMut::from(body.to_vec()); - match Packet::try_from(body) { - Ok(Packet::Fulfill(fulfill)) => Ok(fulfill), - Ok(Packet::Reject(reject)) => Err(reject), - _ => Err(RejectBuilder { - code: ErrorCode::T01_PEER_UNREACHABLE, - message: &[], - triggered_by: Some(&ilp_address_clone.clone()), - data: &[], - } - .build()), + .await?; + // TODO can we get the body as a BytesMut so we don't need to copy? + let body = BytesMut::from(body.as_ref()); + match Packet::try_from(body) { + Ok(Packet::Fulfill(fulfill)) => Ok(fulfill), + Ok(Packet::Reject(reject)) => Err(reject), + _ => Err(RejectBuilder { + code: ErrorCode::T01_PEER_UNREACHABLE, + message: &[], + triggered_by: Some(&ilp_address_clone), + data: &[], } - }) + .build()), + } } diff --git a/crates/interledger-http/src/error/error_types.rs b/crates/interledger-http/src/error/error_types.rs index b307c2649..63c9c9719 100644 --- a/crates/interledger-http/src/error/error_types.rs +++ b/crates/interledger-http/src/error/error_types.rs @@ -1,37 +1,46 @@ -// APIs should implement their own `ApiErrorType`s to provide more detailed information -// about what were the problem, for example, `JSON_SYNTAX_TYPE` or `ACCOUNT_NOT_FOUND_TYPE`. - use super::{ApiError, ApiErrorType, ProblemType}; use http::StatusCode; use lazy_static::lazy_static; -// Default errors +// Common HTTP errors + #[allow(dead_code)] +/// 400 Bad Request HTTP Status Code pub const DEFAULT_BAD_REQUEST_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::Default, title: "Bad Request", status: StatusCode::BAD_REQUEST, }; + +/// 500 Internal Server Error HTTP Status Code pub const DEFAULT_INTERNAL_SERVER_ERROR_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::Default, title: "Internal Server Error", status: StatusCode::INTERNAL_SERVER_ERROR, }; + +/// 401 Unauthorized HTTP Status Code pub const DEFAULT_UNAUTHORIZED_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::Default, title: "Unauthorized", status: StatusCode::UNAUTHORIZED, }; + +/// 404 Not Found HTTP Status Code pub const DEFAULT_NOT_FOUND_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::Default, title: "Not Found", status: StatusCode::NOT_FOUND, }; + +/// 405 Method Not Allowed HTTP Status Code pub const DEFAULT_METHOD_NOT_ALLOWED_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::Default, title: "Method Not Allowed", status: StatusCode::METHOD_NOT_ALLOWED, }; + +/// 409 Conflict HTTP Status Code (used for Idempotency Conflicts) pub const DEFAULT_IDEMPOTENT_CONFLICT_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::Default, title: "Provided idempotency key is tied to other input", @@ -39,28 +48,36 @@ pub const DEFAULT_IDEMPOTENT_CONFLICT_TYPE: ApiErrorType = ApiErrorType { }; // ILP over HTTP specific errors + +/// ILP over HTTP invalid packet error type (400 Bad Request) pub const INVALID_ILP_PACKET_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::InterledgerHttpApi("ilp-over-http/invalid-packet"), title: "Invalid Packet", status: StatusCode::BAD_REQUEST, }; -// JSON deserialization errors +/// Wrong JSON syntax error type (400 Bad Request) pub const JSON_SYNTAX_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::InterledgerHttpApi("json-syntax"), title: "JSON Syntax Error", status: StatusCode::BAD_REQUEST, }; + +/// Wrong JSON data error type (400 Bad Request) pub const JSON_DATA_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::InterledgerHttpApi("json-data"), title: "JSON Data Error", status: StatusCode::BAD_REQUEST, }; + +/// JSON EOF error type (400 Bad Request) pub const JSON_EOF_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::InterledgerHttpApi("json-eof"), title: "JSON Unexpected EOF", status: StatusCode::BAD_REQUEST, }; + +/// JSON IO error type (400 Bad Request) pub const JSON_IO_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::InterledgerHttpApi("json-io"), title: "JSON IO Error", @@ -68,13 +85,15 @@ pub const JSON_IO_TYPE: ApiErrorType = ApiErrorType { }; // Account specific errors + +/// Account Not Found error type (404 Not Found) pub const ACCOUNT_NOT_FOUND_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::InterledgerHttpApi("accounts/account-not-found"), title: "Account Not Found", status: StatusCode::NOT_FOUND, }; -// Node settings specific errors +/// Invalid Account Id error type (400 Bad Request) pub const INVALID_ACCOUNT_ID_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::InterledgerHttpApi("settings/invalid-account-id"), title: "Invalid Account Id", @@ -84,7 +103,8 @@ pub const INVALID_ACCOUNT_ID_TYPE: ApiErrorType = ApiErrorType { // String used for idempotency errors pub static IDEMPOTENCY_CONFLICT_ERR: &str = "Provided idempotency key is tied to other input"; -// Idempotency errors +/// 409 Conflict HTTP Status Code (used for Idempotency Conflicts) +// TODO: Remove this since it is a duplicate of DEFAULT_IDEMPOTENT_CONFLICT_TYPE pub const IDEMPOTENT_STORE_CALL_ERROR_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::Default, title: "Store idempotency error", @@ -92,6 +112,8 @@ pub const IDEMPOTENT_STORE_CALL_ERROR_TYPE: ApiErrorType = ApiErrorType { }; lazy_static! { + /// Error which must be returned when the same idempotency key is + /// used for more than 1 input pub static ref IDEMPOTENT_STORE_CALL_ERROR: ApiError = ApiError::from_api_error_type(&IDEMPOTENT_STORE_CALL_ERROR_TYPE) .detail("Could not process idempotent data in store"); diff --git a/crates/interledger-http/src/error/mod.rs b/crates/interledger-http/src/error/mod.rs index dcdd70581..4c3c3e8ff 100644 --- a/crates/interledger-http/src/error/mod.rs +++ b/crates/interledger-http/src/error/mod.rs @@ -1,3 +1,5 @@ +/// APIs should implement their own `ApiErrorType`s to provide more detailed information +/// about what were the problem, for example, `JSON_SYNTAX_TYPE` or `ACCOUNT_NOT_FOUND_TYPE`. mod error_types; pub use error_types::*; @@ -12,7 +14,12 @@ use std::{ error::Error as StdError, fmt::{self, Display}, }; -use warp::{reject::custom, reply::json, reply::Response, Rejection, Reply}; +use warp::{ + reject::{custom, Reject}, + reply::json, + reply::Response, + Rejection, Reply, +}; /// API error type prefix of problems. /// This URL prefix is currently not published but we assume that in the future. @@ -65,6 +72,7 @@ pub struct ApiError { pub extension_members: Option>, } +/// Distinguishes between RFC7807 and Interledger API Errors #[derive(Clone, Copy, Debug)] pub enum ProblemType { /// `Default` is a [pre-defined value](https://tools.ietf.org/html/rfc7807#section-4.2) which is @@ -76,10 +84,14 @@ pub enum ProblemType { InterledgerHttpApi(&'static str), } +/// Error type used as a basis for creating Warp-compatible Errors #[derive(Clone, Copy, Debug)] pub struct ApiErrorType { + /// Interledger or RFC7807 Error pub r#type: &'static ProblemType, + /// The Title to be used for the error page pub title: &'static str, + /// The HTTP Status Code for the error pub status: http::StatusCode, } @@ -105,6 +117,7 @@ where } impl ApiError { + /// Constructs an API Error from a [ApiErrorType](./struct.ApiErrorType.html) pub fn from_api_error_type(problem_type: &ApiErrorType) -> Self { ApiError { r#type: problem_type.r#type, @@ -118,39 +131,49 @@ impl ApiError { // Note that we should basically avoid using the following default errors because // we should provide more detailed information for developers + #[allow(dead_code)] + /// Returns a Bad Request [ApiError](./struct.ApiError.html) pub fn bad_request() -> Self { ApiError::from_api_error_type(&DEFAULT_BAD_REQUEST_TYPE) } + /// Returns an Internal Server Error [ApiError](./struct.ApiError.html) pub fn internal_server_error() -> Self { ApiError::from_api_error_type(&DEFAULT_INTERNAL_SERVER_ERROR_TYPE) } + /// Returns an Unauthorized [ApiError](./struct.ApiError.html) pub fn unauthorized() -> Self { ApiError::from_api_error_type(&DEFAULT_UNAUTHORIZED_TYPE) } #[allow(dead_code)] + /// Returns an Error Not Found [ApiError](./struct.ApiError.html) pub fn not_found() -> Self { ApiError::from_api_error_type(&DEFAULT_NOT_FOUND_TYPE) } #[allow(dead_code)] + /// Returns a Method Not Found [ApiError](./struct.ApiError.html) pub fn method_not_allowed() -> Self { ApiError::from_api_error_type(&DEFAULT_METHOD_NOT_ALLOWED_TYPE) } + /// Returns an Account not Found [ApiError](./struct.ApiError.html) pub fn account_not_found() -> Self { ApiError::from_api_error_type(&ACCOUNT_NOT_FOUND_TYPE) .detail("Username was not found.".to_owned()) } #[allow(dead_code)] + /// Returns an Idempotency Conflict [ApiError](./struct.ApiError.html) + /// via the [default idempotency conflict ApiErrorType](./error_types/constant.DEFAULT_IDEMPOTENT_CONFLICT_TYPE.html) pub fn idempotency_conflict() -> Self { ApiError::from_api_error_type(&DEFAULT_IDEMPOTENT_CONFLICT_TYPE) } + /// Returns an Invalid Account Id [ApiError](./struct.ApiError.html) pub fn invalid_account_id(invalid_account_id: Option<&str>) -> Self { let detail = match invalid_account_id { Some(invalid_account_id) => match invalid_account_id.len() { @@ -162,10 +185,12 @@ impl ApiError { ApiError::from_api_error_type(&INVALID_ACCOUNT_ID_TYPE).detail(detail) } + /// Returns an Invalid ILP over HTTP [ApiError](./struct.ApiError.html) pub fn invalid_ilp_packet() -> Self { ApiError::from_api_error_type(&INVALID_ILP_PACKET_TYPE) } + /// Sets the [`detail`](./struct.ApiError.html#structfield.detail) field pub fn detail(mut self, detail: T) -> Self where T: Into, @@ -175,6 +200,7 @@ impl ApiError { } #[allow(dead_code)] + /// Sets the [`instance`](./struct.ApiError.html#structfield.instance) field pub fn instance(mut self, instance: T) -> Self where T: Into, @@ -183,8 +209,9 @@ impl ApiError { self } - pub fn extension_members(mut self, extension_members: Option>) -> Self { - self.extension_members = extension_members; + /// Sets the [`extension_members`](./struct.ApiError.html#structfield.extension_members) field + pub fn extension_members(mut self, extension_members: Map) -> Self { + self.extension_members = Some(extension_members); self } @@ -236,6 +263,8 @@ impl From for Rejection { } } +impl Reject for ApiError {} + lazy_static! { static ref MISSING_FIELD_REGEX: Regex = Regex::new("missing field `(.*)`").unwrap(); } @@ -248,6 +277,7 @@ pub struct JsonDeserializeError { } impl StdError for JsonDeserializeError {} +impl Reject for JsonDeserializeError {} impl Display for JsonDeserializeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -290,15 +320,14 @@ impl Reply for JsonDeserializeError { Category::Io => &JSON_IO_TYPE, }; let detail = self.detail; - let extension_members = match extension_members.keys().len() { - 0 => None, - _ => Some(extension_members), - }; - ApiError::from_api_error_type(api_error_type) - .detail(detail) - .extension_members(extension_members) - .into_response() + let mut error = ApiError::from_api_error_type(api_error_type).detail(detail); + + if extension_members.keys().len() > 0 { + error = error.extension_members(extension_members); + } + + error.into_response() } } @@ -309,12 +338,12 @@ impl From for Rejection { } // Receives `ApiError`s and `JsonDeserializeError` and return it in the RFC7807 format. -pub fn default_rejection_handler(err: warp::Rejection) -> Result { - if let Some(api_error) = err.find_cause::() { +pub async fn default_rejection_handler(err: warp::Rejection) -> Result { + if let Some(api_error) = err.find::() { Ok(api_error.clone().into_response()) - } else if let Some(json_error) = err.find_cause::() { + } else if let Some(json_error) = err.find::() { Ok(json_error.clone().into_response()) - } else if err.status() == http::status::StatusCode::METHOD_NOT_ALLOWED { + } else if err.find::().is_some() { Ok(ApiError::from_api_error_type(&DEFAULT_METHOD_NOT_ALLOWED_TYPE).into_response()) } else { Err(err) diff --git a/crates/interledger-http/src/lib.rs b/crates/interledger-http/src/lib.rs index 29eec8b03..3a8734afd 100644 --- a/crates/interledger-http/src/lib.rs +++ b/crates/interledger-http/src/lib.rs @@ -2,76 +2,87 @@ //! //! Client and server implementations of the [ILP-Over-HTTP](https://github.com/interledger/rfcs/blob/master/0035-ilp-over-http/0035-ilp-over-http.md) bilateral communication protocol. //! This protocol is intended primarily for server-to-server communication between peers on the Interledger network. -use bytes::Buf; -use error::*; -use futures::Future; +use async_trait::async_trait; +use bytes::Bytes; use interledger_service::{Account, Username}; use mime::Mime; use secrecy::SecretString; use serde::de::DeserializeOwned; use url::Url; -use warp::{self, filters::body::FullBody, Filter, Rejection}; +use warp::{self, Filter, Rejection}; +/// [ILP over HTTP](https://interledger.org/rfcs/0035-ilp-over-http/) Outgoing Service mod client; -mod server; - -// So that settlement engines can use errors +/// [RFC7807](https://tools.ietf.org/html/rfc7807) compliant errors pub mod error; +/// [ILP over HTTP](https://interledger.org/rfcs/0035-ilp-over-http/) API (implemented with [Warp](https://docs.rs/warp/0.2.0/warp/)) +mod server; pub use self::client::HttpClientService; pub use self::server::HttpServer; +/// Extension trait for [Account](../interledger_service/trait.Account.html) with [ILP over HTTP](https://interledger.org/rfcs/0035-ilp-over-http/) related information pub trait HttpAccount: Account { + /// Returns the HTTP URL corresponding to this account fn get_http_url(&self) -> Option<&Url>; + /// Returns the HTTP token which is sent as an HTTP header on each ILP over HTTP request fn get_http_auth_token(&self) -> Option; } /// The interface for Stores that can be used with the HttpServerService. // TODO do we need all of these constraints? +#[async_trait] pub trait HttpStore: Clone + Send + Sync + 'static { type Account: HttpAccount; /// Load account details based on the full HTTP Authorization header /// received on the incoming HTTP request. - fn get_account_from_http_auth( + async fn get_account_from_http_auth( &self, username: &Username, token: &str, - ) -> Box + Send>; + ) -> Result; } +// TODO: Do we really need this custom deserialization function? +// You'd expect that Serde would be able to handle this. +/// Helper function to deserialize JSON inside Warp +/// The content-type MUST be application/json and if a charset +/// is specified, it MUST be UTF-8 pub fn deserialize_json( ) -> impl Filter + Copy { warp::header::("content-type") - .and(warp::body::concat()) - .and_then(|content_type: String, buf: FullBody| { - let mime_type: Mime = content_type.parse().map_err::(|_| { - error::ApiError::bad_request() - .detail("Invalid content-type header.") - .into() - })?; - if mime_type.type_() != mime::APPLICATION_JSON.type_() { - return Err(error::ApiError::bad_request() - .detail("Invalid content-type.") - .into()); - } else if let Some(charset) = mime_type.get_param("charset") { - // Charset should be UTF-8 - // https://tools.ietf.org/html/rfc8259#section-8.1 - if charset != mime::UTF_8 { - return Err(error::ApiError::bad_request() - .detail("Charset should be UTF-8.") - .into()); + .and(warp::body::bytes()) + .and_then(|content_type: String, buf: Bytes| { + async move { + let mime_type: Mime = content_type.parse().map_err(|_| { + Rejection::from( + error::ApiError::bad_request().detail("Invalid content-type header."), + ) + })?; + if mime_type.type_() != mime::APPLICATION_JSON.type_() { + return Err(Rejection::from( + error::ApiError::bad_request().detail("Invalid content-type."), + )); + } else if let Some(charset) = mime_type.get_param("charset") { + // Charset should be UTF-8 + // https://tools.ietf.org/html/rfc8259#section-8.1 + if charset != mime::UTF_8 { + return Err(Rejection::from( + error::ApiError::bad_request().detail("Charset should be UTF-8."), + )); + } } - } - let deserializer = &mut serde_json::Deserializer::from_slice(&buf.bytes()); - serde_path_to_error::deserialize(deserializer).map_err(|err| { - warp::reject::custom(JsonDeserializeError { - category: err.inner().classify(), - detail: err.inner().to_string(), - path: err.path().clone(), + let deserializer = &mut serde_json::Deserializer::from_slice(&buf); + serde_path_to_error::deserialize(deserializer).map_err(|err| { + warp::reject::custom(error::JsonDeserializeError { + category: err.inner().classify(), + detail: err.inner().to_string(), + path: err.path().clone(), + }) }) - }) + } }) } @@ -86,49 +97,57 @@ mod tests { string_value: String, } - #[test] - fn deserialize_json_header() { + #[tokio::test] + async fn deserialize_json_header() { let json_filter = deserialize_json::(); let body_correct = r#"{"string_value": "some string value"}"#; let body_incorrect = r#"{"other_key": 0}"#; // `content-type` should be provided. - assert_eq!(request().body(body_correct).matches(&json_filter), false); + assert_eq!( + request().body(body_correct).matches(&json_filter).await, + false + ); // Should accept only "application/json" or "application/json; charset=utf-8" assert_eq!( request() .body(body_correct) .header("content-type", "text/plain") - .matches(&json_filter), + .matches(&json_filter) + .await, false ); assert_eq!( request() .body(body_correct) .header("content-type", "application/json") - .matches(&json_filter), + .matches(&json_filter) + .await, true ); assert_eq!( request() .body(body_correct) .header("content-type", "application/json; charset=ascii") - .matches(&json_filter), + .matches(&json_filter) + .await, false ); assert_eq!( request() .body(body_correct) .header("content-type", "application/json; charset=utf-8") - .matches(&json_filter), + .matches(&json_filter) + .await, true ); assert_eq!( request() .body(body_correct) .header("content-type", "application/json; charset=UTF-8") - .matches(&json_filter), + .matches(&json_filter) + .await, true ); @@ -137,14 +156,16 @@ mod tests { request() .body(body_incorrect) .header("content-type", "application/json") - .matches(&json_filter), + .matches(&json_filter) + .await, false ); assert_eq!( request() .body(body_incorrect) .header("content-type", "application/json; charset=utf-8") - .matches(&json_filter), + .matches(&json_filter) + .await, false ); } diff --git a/crates/interledger-http/src/server.rs b/crates/interledger-http/src/server.rs index 1700a1f3e..8954d1a52 100644 --- a/crates/interledger-http/src/server.rs +++ b/crates/interledger-http/src/server.rs @@ -1,105 +1,144 @@ use super::{error::*, HttpStore}; -use bytes::{buf::Buf, Bytes, BytesMut}; -use futures::{ - future::{err, Either, FutureResult}, - Future, -}; +use bytes::{Bytes, BytesMut}; +use futures::TryFutureExt; use interledger_packet::Prepare; use interledger_service::Username; use interledger_service::{IncomingRequest, IncomingService}; use log::error; use secrecy::{ExposeSecret, SecretString}; -use std::{convert::TryFrom, net::SocketAddr}; -use warp::{self, Filter, Rejection}; +use std::convert::TryFrom; +use std::net::SocketAddr; +use warp::{Filter, Rejection}; /// Max message size that is allowed to transfer from a request or a message. pub const MAX_PACKET_SIZE: u64 = 40000; +/// The offset after which the bearer token should be in an ILP over HTTP request +/// e.g. in `token = "Bearer: MyAuthToken"`, `MyAuthToken` can be taken via token[BEARER_TOKEN_START..] pub const BEARER_TOKEN_START: usize = 7; /// A warp filter that parses incoming ILP-Over-HTTP requests, validates the authorization, /// and passes the request to an IncomingService handler. #[derive(Clone)] pub struct HttpServer { + /// The next [incoming service](../interledger_service/trait.IncomingService.html) incoming: I, + /// A store which implements [`HttpStore`](trait.HttpStore.html) store: S, } -impl HttpServer +#[inline] +/// Returns the account which matches the provided username/password combination +/// from the store, or returns an error if the account was not found or if the +/// credentials were incorrect +async fn get_account( + store: S, + path_username: &Username, + password: &SecretString, +) -> Result where - I: IncomingService + Clone + Send + Sync + 'static, S: HttpStore, +{ + if password.expose_secret().len() < BEARER_TOKEN_START { + return Err(()); + } + store + .get_account_from_http_auth( + &path_username, + &password.expose_secret()[BEARER_TOKEN_START..], + ) + .await +} + +#[inline] +/// Implements ILP over HTTP. If account authentication is valid +/// and the provided packet can be parsed as a +/// [Prepare](../../interledger_packet/struct.Prepare.html) packet, +/// then it is forwarded to the next incoming service which will return +/// an Ok result if the response is a [Fulfill](../../interledger_packet/struct.Fulfill.html). +/// +/// # Errors +/// 1. Unauthorized account if invalid credentials are provided +/// 1. The provided `body` could not be parsed as a Prepare packet +/// 1. A Reject packet was returned by the next incoming service +async fn ilp_over_http( + path_username: Username, + password: SecretString, + body: Bytes, + store: S, + incoming: I, +) -> Result +where + S: HttpStore, + I: IncomingService + Clone, +{ + let mut incoming = incoming.clone(); + let account = get_account(store, &path_username, &password) + .map_err(|_| -> Rejection { + error!("Invalid authorization provided for user: {}", path_username); + ApiError::unauthorized().into() + }) + .await?; + + let buffer = bytes::BytesMut::from(body.as_ref()); + if let Ok(prepare) = Prepare::try_from(buffer) { + let result = incoming + .handle_request(IncomingRequest { + from: account, + prepare, + }) + .await; + + let bytes: BytesMut = match result { + Ok(fulfill) => fulfill.into(), + Err(reject) => reject.into(), + }; + + Ok(warp::http::Response::builder() + .header("Content-Type", "application/octet-stream") + .status(200) + .body(bytes.freeze()) // TODO: bring this back + .unwrap()) + } else { + error!("Body was not a valid Prepare packet"); + Err(Rejection::from(ApiError::invalid_ilp_packet())) + } +} + +impl HttpServer +where + I: IncomingService + Clone + Send + Sync, + S: HttpStore + Clone, { pub fn new(incoming: I, store: S) -> Self { HttpServer { incoming, store } } + /// Returns a Warp filter which exposes per-account endpoints for [ILP over HTTP](https://interledger.org/rfcs/0035-ilp-over-http/). + /// The endpoint is /accounts/:username/ilp. pub fn as_filter( &self, - ) -> impl warp::Filter,), Error = warp::Rejection> + Clone - { - let incoming = self.incoming.clone(); + ) -> impl warp::Filter + Clone { let store = self.store.clone(); - - warp::post2() + let incoming = self.incoming.clone(); + let with_store = warp::any().map(move || store.clone()).boxed(); + let with_incoming = warp::any().map(move || incoming.clone()); + warp::post() .and(warp::path("accounts")) - .and(warp::path::param2::()) + .and(warp::path::param::()) .and(warp::path("ilp")) .and(warp::path::end()) .and(warp::header::("authorization")) - .and_then(move |path_username: Username, password: SecretString| { - if password.expose_secret().len() < BEARER_TOKEN_START { - return Either::A(err(ApiError::bad_request().into())); - } - Either::B( - store - .get_account_from_http_auth( - &path_username, - &password.expose_secret()[BEARER_TOKEN_START..], - ) - .map_err(move |_| -> Rejection { - error!("Invalid authorization provided for user: {}", path_username); - ApiError::unauthorized().into() - }), - ) - }) .and(warp::body::content_length_limit(MAX_PACKET_SIZE)) - .and(warp::body::concat()) - .and_then( - move |account: S::Account, - body: warp::body::FullBody| - -> Either<_, FutureResult<_, Rejection>> { - // TODO don't copy ILP packet - let buffer = BytesMut::from(body.bytes()); - if let Ok(prepare) = Prepare::try_from(buffer) { - Either::A( - incoming - .clone() - .handle_request(IncomingRequest { - from: account, - prepare, - }) - .then(|result| { - let bytes: BytesMut = match result { - Ok(fulfill) => fulfill.into(), - Err(reject) => reject.into(), - }; - Ok(warp::http::Response::builder() - .header("Content-Type", "application/octet-stream") - .status(200) - .body(bytes.freeze()) - .unwrap()) - }), - ) - } else { - error!("Body was not a valid Prepare packet"); - Either::B(err(ApiError::invalid_ilp_packet().into())) - } - }, - ) + .and(warp::body::bytes()) + .and(with_store) + .and(with_incoming) + .and_then(ilp_over_http) } - pub fn bind(&self, addr: SocketAddr) -> impl Future + Send { - warp::serve(self.as_filter()).bind(addr) + // Do we really need to bind self to static? + pub async fn bind(&'static self, addr: SocketAddr) { + let filter = self.as_filter(); + warp::serve(filter).run(addr).await } } @@ -107,8 +146,8 @@ where mod tests { use super::*; use crate::HttpAccount; - use bytes::{Bytes, BytesMut}; - use futures::future::ok; + use async_trait::async_trait; + use bytes::BytesMut; use http::Response; use interledger_packet::{Address, ErrorCode, PrepareBuilder, RejectBuilder}; use interledger_service::{incoming_service_fn, Account}; @@ -136,7 +175,7 @@ mod tests { } const AUTH_PASSWORD: &str = "password"; - fn api_call( + async fn api_call( api: &F, endpoint: &str, // /ilp or /accounts/:username/ilp auth: &str, // simple bearer or overloaded username+password @@ -152,19 +191,20 @@ mod tests { .header("Content-length", 1000) .body(PREPARE_BYTES.clone()) .reply(api) + .await } - #[test] - fn new_api_test() { + #[tokio::test] + async fn new_api_test() { let store = TestStore; let incoming = incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: None, } - .build())) + .build()) }); let api = HttpServer::new(incoming, store) .as_filter() @@ -175,11 +215,12 @@ mod tests { &api, "/accounts/alice/ilp", &format!("{}:{}", USERNAME.to_string(), AUTH_PASSWORD), - ); + ) + .await; assert_eq!(resp.status().as_u16(), 401); // Works with just the password - let resp = api_call(&api, "/accounts/alice/ilp", AUTH_PASSWORD); + let resp = api_call(&api, "/accounts/alice/ilp", AUTH_PASSWORD).await; assert_eq!(resp.status().as_u16(), 200); } @@ -218,17 +259,18 @@ mod tests { #[derive(Debug, Clone)] struct TestStore; + #[async_trait] impl HttpStore for TestStore { type Account = TestAccount; - fn get_account_from_http_auth( + async fn get_account_from_http_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { + ) -> Result { if username == &*USERNAME && token == AUTH_PASSWORD { - Box::new(ok(TestAccount)) + Ok(TestAccount) } else { - Box::new(err(())) + Err(()) } } } diff --git a/crates/interledger-service-util/Cargo.toml b/crates/interledger-service-util/Cargo.toml index 8af5d60fe..bdce10aa9 100644 --- a/crates/interledger-service-util/Cargo.toml +++ b/crates/interledger-service-util/Cargo.toml @@ -8,22 +8,23 @@ edition = "2018" repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] -bytes = { version = "0.4.12", default-features = false } +bytes = { version = "0.5", default-features = false } byteorder = { version = "1.3.2", default-features = false } chrono = { version = "0.4.9", default-features = false, features = ["clock"] } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3.1", default-features = false } hex = { version = "0.4.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } interledger-settlement = { path = "../interledger-settlement", version = "^0.3.0", default-features = false, features = ["settlement_api"] } lazy_static = { version = "1.4.0", default-features = false } log = { version = "0.4.8", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10.0", default-features = false, features = ["default-tls"] } ring = { version = "0.16.9", default-features = false } -secrecy = { version = "0.5.1", default-features = false, features = ["alloc", "serde"] } +secrecy = { version = "0.6", default-features = false, features = ["alloc", "serde"] } serde = { version = "1.0.101", default-features = false, features = ["derive"]} -tokio = { version = "0.1.22", default-features = false } -tokio-executor = { version = "0.1.8", default-features = false } +tokio = { version = "0.2.6", default-features = false, features = ["macros", "time"] } +async-trait = "0.1.22" [dev-dependencies] -uuid = { version = "0.8.1", default-features = false} \ No newline at end of file +uuid = { version = "0.8.1", default-features = false} +bytes04 = { package = "bytes", version = "0.4", default-features = false } diff --git a/crates/interledger-service-util/src/balance_service.rs b/crates/interledger-service-util/src/balance_service.rs index 93103a2c0..2d5c34186 100644 --- a/crates/interledger-service-util/src/balance_service.rs +++ b/crates/interledger-service-util/src/balance_service.rs @@ -1,5 +1,6 @@ -use futures::Future; -use interledger_packet::{ErrorCode, Fulfill, Reject, RejectBuilder}; +use async_trait::async_trait; +use futures::TryFutureExt; +use interledger_packet::{ErrorCode, RejectBuilder}; use interledger_service::*; use interledger_settlement::{ api::SettlementClient, @@ -7,32 +8,35 @@ use interledger_settlement::{ }; use log::{debug, error}; use std::marker::PhantomData; -use tokio_executor::spawn; +// TODO: Remove AccountStore dependency, use `AccountId: ToString` as associated type +/// Trait responsible for managing an account's balance in the store +/// as ILP Packets get routed +#[async_trait] pub trait BalanceStore: AccountStore { /// Fetch the current balance for the given account. - fn get_balance(&self, account: Self::Account) - -> Box + Send>; + async fn get_balance(&self, account: Self::Account) -> Result; - fn update_balances_for_prepare( + /// Decreases the sending account's balance before forwarding out a prepare packet + async fn update_balances_for_prepare( &self, from_account: Self::Account, incoming_amount: u64, - ) -> Box + Send>; + ) -> Result<(), ()>; - /// Increases the account's balance, and returns the updated balance + /// Increases the receiving account's balance, and returns the updated balance /// along with the amount which should be settled - fn update_balances_for_fulfill( + async fn update_balances_for_fulfill( &self, to_account: Self::Account, outgoing_amount: u64, - ) -> Box + Send>; + ) -> Result<(i64, u64), ()>; - fn update_balances_for_reject( + async fn update_balances_for_reject( &self, from_account: Self::Account, incoming_amount: u64, - ) -> Box + Send>; + ) -> Result<(), ()>; } /// # Balance Service @@ -64,6 +68,7 @@ where } } +#[async_trait] impl OutgoingService for BalanceService where S: AddressStore @@ -74,10 +79,8 @@ where + Sync + 'static, O: OutgoingService + Send + Clone + 'static, - A: SettlementAccount + 'static, + A: SettlementAccount + Send + Sync + 'static, { - type Future = BoxedIlpFuture; - /// On send message: /// 1. Calls `store.update_balances_for_prepare` with the prepare. /// If it fails, it replies with a reject @@ -86,21 +89,17 @@ where /// INDEPENDENTLY of if the call suceeds or fails. This makes a `sendMoney` call if the fulfill puts the account's balance over the `settle_threshold` /// - if it returns an reject calls `store.update_balances_for_reject` and replies with the fulfill /// INDEPENDENTLY of if the call suceeds or fails - fn send_request( - &mut self, - request: OutgoingRequest, - ) -> Box + Send> { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { // Don't bother touching the store for zero-amount packets. // Note that it is possible for the original_amount to be >0 while the // prepare.amount is 0, because the original amount could be rounded down // to 0 when exchange rate and scale change are applied. if request.prepare.amount() == 0 && request.original_amount == 0 { - return Box::new(self.next.send_request(request)); + return self.next.send_request(request).await; } let mut next = self.next.clone(); let store = self.store.clone(); - let store_clone = store.clone(); let from = request.from.clone(); let from_clone = from.clone(); let from_id = from.id(); @@ -123,79 +122,92 @@ where // _eventually_ be completed. Because of this settlement_engine guarantee, the Connector can // operate as-if the settlement engine has completed. Finally, if the request to the settlement-engine // fails, this amount will be re-added back to balance. - Box::new( - self.store - .update_balances_for_prepare( - from.clone(), - incoming_amount, - ) - .map_err(move |_| { - debug!("Rejecting packet because it would exceed a balance limit"); - RejectBuilder { - code: ErrorCode::T04_INSUFFICIENT_LIQUIDITY, - message: &[], - triggered_by: Some(&ilp_address), - data: &[], - } - .build() - }) - .and_then(move |_| { - next.send_request(request) - .and_then(move |fulfill| { - // We will spawn a task to update the balances in the database - // so that we DO NOT wait for the database before sending the - // Fulfill packet back to our peer. Due to how the flow of ILP - // packets work, once we get the Fulfill back from the next node - // we need to propagate it backwards ASAP. If we do not give the - // previous node the fulfillment in time, they won't pay us back - // for the packet we forwarded. Note this means that we will - // relay the fulfillment _even if saving to the DB fails._ - let fulfill_balance_update = store.update_balances_for_fulfill( - to.clone(), - outgoing_amount, - ) - .map_err(move |_| error!("Error applying balance changes for fulfill from account: {} to account: {}. Incoming amount was: {}, outgoing amount was: {}", from_id, to_id, incoming_amount, outgoing_amount)) - .and_then(move |(balance, amount_to_settle)| { - debug!("Account balance after fulfill: {}. Amount that needs to be settled: {}", balance, amount_to_settle); - if amount_to_settle > 0 && to_has_engine { - // Note that if this program crashes after changing the balance (in the PROCESS_FULFILL script) - // and the send_settlement fails but the program isn't alive to hear that, the balance will be incorrect. - // No other instance will know that it was trying to send an outgoing settlement. We could - // make this more robust by saving something to the DB about the outgoing settlement when we change the balance - // but then we would also need to prevent a situation where every connector instance is polling the - // settlement engine for the status of each - // outgoing settlement and putting unnecessary - // load on the settlement engine. - spawn(settlement_client - .send_settlement(to, amount_to_settle) - .or_else(move |_| store.refund_settlement(to_id, amount_to_settle))); - } - Ok(()) - }); + self.store + .update_balances_for_prepare(from.clone(), incoming_amount) + .map_err(move |_| { + debug!("Rejecting packet because it would exceed a balance limit"); + RejectBuilder { + code: ErrorCode::T04_INSUFFICIENT_LIQUIDITY, + message: &[], + triggered_by: Some(&ilp_address), + data: &[], + } + .build() + }) + .await?; - spawn(fulfill_balance_update); + match next.send_request(request).await { + Ok(fulfill) => { + // We will spawn a task to update the balances in the database + // so that we DO NOT wait for the database before sending the + // Fulfill packet back to our peer. Due to how the flow of ILP + // packets work, once we get the Fulfill back from the next node + // we need to propagate it backwards ASAP. If we do not give the + // previous node the fulfillment in time, they won't pay us back + // for the packet we forwarded. Note this means that we will + // relay the fulfillment _even if saving to the DB fails._ + tokio::spawn(async move { + let (balance, amount_to_settle) = match store + .update_balances_for_fulfill(to.clone(), outgoing_amount) + .await + { + Ok(r) => r, + Err(_) => { + error!("Error applying balance changes for fulfill from account: {} to account: {}. Incoming amount was: {}, outgoing amount was: {}", from_id, to_id, incoming_amount, outgoing_amount); + return Err(()); + } + }; + debug!( + "Account balance after fulfill: {}. Amount that needs to be settled: {}", + balance, amount_to_settle + ); + if amount_to_settle > 0 && to_has_engine { + // Note that if this program crashes after changing the balance (in the PROCESS_FULFILL script) + // and the send_settlement fails but the program isn't alive to hear that, the balance will be incorrect. + // No other instance will know that it was trying to send an outgoing settlement. We could + // make this more robust by saving something to the DB about the outgoing settlement when we change the balance + // but then we would also need to prevent a situation where every connector instance is polling the + // settlement engine for the status of each + // outgoing settlement and putting unnecessary + // load on the settlement engine. + tokio::spawn(async move { + if settlement_client + .send_settlement(to, amount_to_settle) + .await + .is_err() + { + store.refund_settlement(to_id, amount_to_settle).await?; + } + Ok::<(), ()>(()) + }); + } + Ok(()) + }); - Ok(fulfill) - }) - .or_else(move |reject| { - // Similar to the logic for handling the Fulfill packet above, we - // spawn a task to update the balance for the Reject in parallel - // rather than waiting for the database to update before relaying - // the packet back. In this case, the only substantive difference - // would come from if the DB operation fails or takes too long. - // The packet is already rejected so it's more useful for the sender - // to get the error message from the original Reject packet rather - // than a less specific one saying that this node had an "internal - // error" caused by a database issue. - let reject_balance_update = store_clone.update_balances_for_reject( - from_clone.clone(), - incoming_amount, - ).map_err(move |_| error!("Error rolling back balance change for accounts: {} and {}. Incoming amount was: {}, outgoing amount was: {}", from_clone.id(), to_clone.id(), incoming_amount, outgoing_amount)); - spawn(reject_balance_update); + Ok(fulfill) + } + Err(reject) => { + // Similar to the logic for handling the Fulfill packet above, we + // spawn a task to update the balance for the Reject in parallel + // rather than waiting for the database to update before relaying + // the packet back. In this case, the only substantive difference + // would come from if the DB operation fails or takes too long. + // The packet is already rejected so it's more useful for the sender + // to get the error message from the original Reject packet rather + // than a less specific one saying that this node had an "internal + // error" caused by a database issue. + tokio::spawn({ + let store_clone = self.store.clone(); + async move { + store_clone.update_balances_for_reject( + from_clone.clone(), + incoming_amount, + ).map_err(move |_| error!("Error rolling back balance change for accounts: {} and {}. Incoming amount was: {}, outgoing amount was: {}", from_clone.id(), to_clone.id(), incoming_amount, outgoing_amount)).await + } + }); - Err(reject) - }) - }), - ) + Err(reject) + } + } } } diff --git a/crates/interledger-service-util/src/echo_service.rs b/crates/interledger-service-util/src/echo_service.rs index 1cd227f17..6bd7e1941 100644 --- a/crates/interledger-service-util/src/echo_service.rs +++ b/crates/interledger-service-util/src/echo_service.rs @@ -1,7 +1,7 @@ +use async_trait::async_trait; use byteorder::ReadBytesExt; use bytes::{BufMut, BytesMut}; use core::borrow::Borrow; -use futures::future::err; use interledger_packet::{ oer::BufOerExt, Address, ErrorCode, Prepare, PrepareBuilder, RejectBuilder, }; @@ -12,11 +12,6 @@ use std::marker::PhantomData; use std::str; use std::time::SystemTime; -/// A service that responds to the Echo Protocol. -/// Currently, this service only supports bidirectional mode (unidirectional mode is not supported yet). -/// The service doesn't shorten expiry as it expects the expiry to be shortened by another service -/// like `ExpiryShortenerService`. - /// The prefix that echo packets should have in its data section const ECHO_PREFIX: &str = "ECHOECHOECHOECHO"; /// The length of the `ECHO_PREFIX` @@ -27,6 +22,10 @@ enum EchoPacketType { Response = 1, } +/// A service that implements the Echo Protocol. +/// Currently, this service only supports bidirectional mode (unidirectional mode is not supported yet). +/// The service doesn't shorten expiry as it expects the expiry to be shortened by another service +/// like `ExpiryShortenerService`. #[derive(Clone)] pub struct EchoService { store: S, @@ -40,6 +39,7 @@ where I: IncomingService, A: Account, { + /// Simple Constructor pub fn new(store: S, next: I) -> Self { EchoService { store, @@ -49,20 +49,19 @@ where } } +#[async_trait] impl IncomingService for EchoService where - I: IncomingService, - S: AddressStore, - A: Account, + I: IncomingService + Send, + S: AddressStore + Send, + A: Account + Send, { - type Future = BoxedIlpFuture; - - fn handle_request(&mut self, mut request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, mut request: IncomingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); let should_echo = request.prepare.destination() == ilp_address && request.prepare.data().starts_with(ECHO_PREFIX.as_bytes()); if !should_echo { - return Box::new(self.next.handle_request(request)); + return self.next.handle_request(request).await; } debug!("Responding to Echo protocol request: {:?}", request); @@ -78,23 +77,23 @@ where Ok(value) => value, Err(error) => { eprintln!("Could not read packet type: {:?}", error); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F01_INVALID_PACKET, message: b"Could not read echo packet type.", triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } }; if echo_packet_type == EchoPacketType::Response as u8 { // if the echo packet type is Response, just pass it to the next service // so that the initiator could handle this packet - return Box::new(self.next.handle_request(request)); + return self.next.handle_request(request).await; } if echo_packet_type != EchoPacketType::Request as u8 { eprintln!("The packet type is not acceptable: {}", echo_packet_type); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F01_INVALID_PACKET, message: format!( "The echo packet type: {} is not acceptable.", @@ -104,7 +103,7 @@ where triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } // check source address @@ -116,24 +115,24 @@ where "Could not parse source address from echo packet: {:?}", error ); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F01_INVALID_PACKET, message: b"Could not parse source address from Echo packet", triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } }, Err(error) => { eprintln!("Could not read source address: {:?}", error); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F01_INVALID_PACKET, message: b"Could not read source address.", triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } }; @@ -150,7 +149,7 @@ where } .build(); - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await } } @@ -171,8 +170,10 @@ pub struct EchoRequestBuilder<'a> { #[cfg(test)] impl<'a> EchoRequestBuilder<'a> { pub fn build(&self) -> Prepare { + use bytes04::BufMut as BufMut04; let source_address_len = oer::predict_var_octet_string(self.source_address.len()); - let mut data_buffer = BytesMut::with_capacity(ECHO_PREFIX_LEN + 1 + source_address_len); + let mut data_buffer = + bytes04::BytesMut::with_capacity(ECHO_PREFIX_LEN + 1 + source_address_len); data_buffer.put(ECHO_PREFIX.as_bytes()); data_buffer.put_u8(EchoPacketType::Request as u8); data_buffer.put_var_octet_string(self.source_address.as_ref() as &[u8]); @@ -214,7 +215,6 @@ impl<'a> EchoResponseBuilder<'a> { #[cfg(test)] mod echo_tests { use super::*; - use futures::future::Future; use interledger_packet::{FulfillBuilder, PrepareBuilder}; use interledger_service::incoming_service_fn; use lazy_static::lazy_static; @@ -232,16 +232,14 @@ mod echo_tests { #[derive(Clone)] struct TestStore(Address); + #[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } @@ -279,8 +277,8 @@ mod echo_tests { /// If the destination of the packet is not destined to the node's address, /// the node should not echo the packet. - #[test] - fn test_echo_packet_not_destined() { + #[tokio::test] + async fn test_echo_packet_not_destined() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -319,14 +317,14 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_ok()); } /// Even if the destination of the packet is the node's address, /// packets that don't have a correct echo prefix will not be handled as echo packets. - #[test] - fn test_echo_packet_without_echo_prefix() { + #[tokio::test] + async fn test_echo_packet_without_echo_prefix() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -365,14 +363,14 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_ok()); } /// If the destination of the packet is the node's address and the echo packet type is /// request, the service will echo the packet modifying destination to the `source_address`. - #[test] - fn test_echo_packet() { + #[tokio::test] + async fn test_echo_packet() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -411,13 +409,13 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_ok()); } /// If echo packet type is neither `1` nor `2`, the packet is considered to be malformed. - #[test] - fn test_invalid_echo_packet_type() { + #[tokio::test] + async fn test_invalid_echo_packet_type() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -452,14 +450,14 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_err()); } /// Even if the destination of the packet is the node's address and the data starts with /// echo prefix correctly, `source_address` may be broken. This is the case. - #[test] - fn test_invalid_source_address() { + #[tokio::test] + async fn test_invalid_source_address() { let amount = 1; let expires_at = SystemTime::now() + Duration::from_secs(30); let fulfillment = &get_random_fulfillment(); @@ -494,7 +492,7 @@ mod echo_tests { // test let result = echo_service .handle_request(IncomingRequest { prepare, from }) - .wait(); + .await; assert!(result.is_err()); } diff --git a/crates/interledger-service-util/src/exchange_rate_providers/coincap.rs b/crates/interledger-service-util/src/exchange_rate_providers/coincap.rs index 1dd0bf3dd..7044c7114 100644 --- a/crates/interledger-service-util/src/exchange_rate_providers/coincap.rs +++ b/crates/interledger-service-util/src/exchange_rate_providers/coincap.rs @@ -1,7 +1,7 @@ -use futures::Future; +use futures::TryFutureExt; use lazy_static::lazy_static; use log::{error, warn}; -use reqwest::{r#async::Client, Url}; +use reqwest::{Client, Url}; use serde::Deserialize; use std::{collections::HashMap, str::FromStr}; @@ -25,50 +25,50 @@ struct RateResponse { data: Vec, } -pub fn query_coincap(client: &Client) -> impl Future, Error = ()> { - query_coincap_endpoint(client, COINCAP_ASSETS_URL.clone()) - .join(query_coincap_endpoint(client, COINCAP_RATES_URL.clone())) - .and_then(|(assets, rates)| { - let all_rates: HashMap = assets - .data - .into_iter() - .chain(rates.data.into_iter()) - .filter_map(|record| match f64::from_str(record.rate_usd.as_str()) { - Ok(rate) => Some((record.symbol.to_uppercase(), rate)), - Err(err) => { - warn!( - "Unable to parse {} rate as an f64: {} {:?}", - record.symbol, record.rate_usd, err - ); - None - } - }) - .collect(); - Ok(all_rates) +pub async fn query_coincap(client: &Client) -> Result, ()> { + let (assets, rates) = futures::future::join( + query_coincap_endpoint(client, COINCAP_ASSETS_URL.clone()), + query_coincap_endpoint(client, COINCAP_RATES_URL.clone()), + ) + .await; + + let all_rates: HashMap = assets? + .data + .into_iter() + .chain(rates?.data.into_iter()) + .filter_map(|record| match f64::from_str(record.rate_usd.as_str()) { + Ok(rate) => Some((record.symbol.to_uppercase(), rate)), + Err(err) => { + warn!( + "Unable to parse {} rate as an f64: {} {:?}", + record.symbol, record.rate_usd, err + ); + None + } }) + .collect(); + Ok(all_rates) } -fn query_coincap_endpoint( - client: &Client, - url: Url, -) -> impl Future { - client +async fn query_coincap_endpoint(client: &Client, url: Url) -> Result { + let res = client .get(url) .send() .map_err(|err| { error!("Error fetching exchange rates from CoinCap: {:?}", err); }) - .and_then(|res| { - res.error_for_status().map_err(|err| { - error!("HTTP error getting exchange rates from CoinCap: {:?}", err); - }) - }) - .and_then(|mut res| { - res.json().map_err(|err| { - error!( - "Error getting exchange rate response body from CoinCap, incorrect type: {:?}", - err - ); - }) + .await?; + + let res = res.error_for_status().map_err(|err| { + error!("HTTP error getting exchange rates from CoinCap: {:?}", err); + })?; + + res.json() + .map_err(|err| { + error!( + "Error getting exchange rate response body from CoinCap, incorrect type: {:?}", + err + ); }) + .await } diff --git a/crates/interledger-service-util/src/exchange_rate_providers/cryptocompare.rs b/crates/interledger-service-util/src/exchange_rate_providers/cryptocompare.rs index 2a418b3af..3f369da83 100644 --- a/crates/interledger-service-util/src/exchange_rate_providers/cryptocompare.rs +++ b/crates/interledger-service-util/src/exchange_rate_providers/cryptocompare.rs @@ -1,7 +1,7 @@ -use futures::Future; +use futures::TryFutureExt; use lazy_static::lazy_static; use log::error; -use reqwest::{r#async::Client, Url}; +use reqwest::{Client, Url}; use secrecy::{ExposeSecret, SecretString}; use serde::Deserialize; use std::{ @@ -47,44 +47,52 @@ struct Response { data: Vec, } -pub fn query_cryptocompare( +pub async fn query_cryptocompare( client: &Client, api_key: &SecretString, -) -> impl Future, Error = ()> { - client +) -> Result, ()> { + // ref: https://github.com/rust-lang/rust/pull/64856 + let header = format!("Apikey {}", api_key.expose_secret()); + let res = client .get(CRYPTOCOMPARE_URL.clone()) // TODO don't copy the api key on every request - .header( - "Authorization", - format!("Apikey {}", api_key.expose_secret()).as_str(), - ) + .header("Authorization", header) .send() .map_err(|err| { - error!("Error fetching exchange rates from CryptoCompare: {:?}", err); + error!( + "Error fetching exchange rates from CryptoCompare: {:?}", + err + ); }) - .and_then(|res| { - res.error_for_status().map_err(|err| { - error!("HTTP error getting exchange rates from CryptoCompare: {:?}", err); - }) - }) - .and_then(|mut res| { - res.json().map_err(|err| { - error!( - "Error getting exchange rate response body from CryptoCompare, incorrect type: {:?}", - err - ); - }) + .await?; + + let res = res.error_for_status().map_err(|err| { + error!( + "HTTP error getting exchange rates from CryptoCompare: {:?}", + err + ); + })?; + + let res: Response = res + .json() + .map_err(|err| { + error!( + "Error getting exchange rate response body from CryptoCompare, incorrect type: {:?}", + err + ); }) - .and_then(|res: Response| { - let rates = res - .data - .into_iter() - .filter_map(|asset| if let Some(raw) = asset.raw { - Some((asset.coin_info.name.to_uppercase(), raw.usd.price)) - } else { - None - }) - .chain(once(("USD".to_string(), 1.0))); - Ok(HashMap::from_iter(rates)) + .await?; + + let rates = res + .data + .into_iter() + .filter_map(|asset| { + if let Some(raw) = asset.raw { + Some((asset.coin_info.name.to_uppercase(), raw.usd.price)) + } else { + None + } }) + .chain(once(("USD".to_string(), 1.0))); + Ok(HashMap::from_iter(rates)) } diff --git a/crates/interledger-service-util/src/exchange_rate_providers/mod.rs b/crates/interledger-service-util/src/exchange_rate_providers/mod.rs index 4b005cf04..01f84c124 100644 --- a/crates/interledger-service-util/src/exchange_rate_providers/mod.rs +++ b/crates/interledger-service-util/src/exchange_rate_providers/mod.rs @@ -1,4 +1,6 @@ +/// Exchange rate provider for [CoinCap](https://coincap.io/) mod coincap; +/// Exchange rate provider for [CryptoCompare](https://www.cryptocompare.com/). REQUIRES [API KEY](https://min-api.cryptocompare.com/). mod cryptocompare; pub use coincap::*; diff --git a/crates/interledger-service-util/src/exchange_rates_service.rs b/crates/interledger-service-util/src/exchange_rates_service.rs index 4e5bb83a3..f6ab7174b 100644 --- a/crates/interledger-service-util/src/exchange_rates_service.rs +++ b/crates/interledger-service-util/src/exchange_rates_service.rs @@ -1,13 +1,11 @@ use super::exchange_rate_providers::*; -use futures::{ - future::{err, Either}, - Future, Stream, -}; -use interledger_packet::{ErrorCode, Fulfill, Reject, RejectBuilder}; +use async_trait::async_trait; +use futures::TryFutureExt; +use interledger_packet::{ErrorCode, RejectBuilder}; use interledger_service::*; use interledger_settlement::core::types::{Convert, ConvertDetails}; use log::{debug, error, trace, warn}; -use reqwest::r#async::Client; +use reqwest::Client; use secrecy::SecretString; use serde::Deserialize; use std::{ @@ -17,17 +15,19 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::{Duration, Instant}, + time::Duration, }; -use tokio::{executor::spawn, timer::Interval}; // TODO should this whole file be moved to its own crate? +/// Store trait responsible for managing the exchange rates of multiple currencies pub trait ExchangeRateStore: Clone { // TODO we may want to make this async if/when we use pubsub to broadcast // rate changes to different instances of a horizontally-scalable node + /// Sets the exchange rate by providing an AssetCode->USD price mapping fn set_exchange_rates(&self, rates: HashMap) -> Result<(), ()>; + /// Gets the exchange rates for the provided asset codes fn get_exchange_rates(&self, asset_codes: &[&str]) -> Result, ()>; // TODO should this be on the API instead? That's where it's actually used @@ -36,6 +36,7 @@ pub trait ExchangeRateStore: Clone { // (so that we don't accidentally lock up the RwLock on the store's exchange_rates) // but in the normal case of getting the rate between two assets, we don't want to // copy all the rate data + /// Gets the exchange rates for all stored asset codes fn get_all_exchange_rates(&self) -> Result, ()>; } @@ -67,25 +68,21 @@ where } } +#[async_trait] impl OutgoingService for ExchangeRateService where // TODO can we make these non-'static? S: AddressStore + ExchangeRateStore + Clone + Send + Sync + 'static, - O: OutgoingService + Send + Clone + 'static, - A: Account + Sync + 'static, + O: OutgoingService + Send + Sync + Clone + 'static, + A: Account + Send + Sync + 'static, { - type Future = BoxedIlpFuture; - /// On send request: /// 1. If the prepare packet's amount is 0, it just forwards /// 1. Retrieves the exchange rate from the store (the store independently is responsible for polling the rates) /// - return reject if the call to the store fails /// 1. Calculates the exchange rate AND scales it up/down depending on how many decimals each asset requires /// 1. Updates the amount in the prepare packet and forwards it - fn send_request( - &mut self, - mut request: OutgoingRequest, - ) -> Box + Send> { + async fn send_request(&mut self, mut request: OutgoingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); if request.prepare.amount() > 0 { let rate: f64 = if request.from.asset_code() == request.to.asset_code() { @@ -105,7 +102,7 @@ where request.from.asset_code(), request.to.asset_code() ); - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::T00_INTERNAL_ERROR, message: format!( "No exchange rate available from asset: {} to: {}", @@ -116,7 +113,7 @@ where triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); }; // Apply spread @@ -165,13 +162,13 @@ where ) }; - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code, message: message.as_bytes(), triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } request.prepare.set_amount(outgoing_amount as u64); trace!("Converted incoming amount of: {} {} (scale {}) from account {} to outgoing amount of: {} {} (scale {}) for account {}", @@ -183,7 +180,7 @@ where // returns an error. Happens due to float // multiplication overflow . // (float overflow in Rust produces +inf) - return Box::new(err(RejectBuilder { + return Err(RejectBuilder { code: ErrorCode::F08_AMOUNT_TOO_LARGE, message: format!( "Could not convert exchange rate from {}:{} to: {}:{}. Got incoming amount: {}", @@ -197,12 +194,12 @@ where triggered_by: Some(&ilp_address), data: &[], } - .build())); + .build()); } } } - Box::new(self.next.send_request(request)) + self.next.send_request(request).await } } @@ -230,6 +227,7 @@ pub enum ExchangeRateProvider { } /// Poll exchange rate providers for the current exchange rates +#[derive(Clone)] pub struct ExchangeRateFetcher { provider: ExchangeRateProvider, consecutive_failed_polls: Arc, @@ -242,6 +240,7 @@ impl ExchangeRateFetcher where S: ExchangeRateStore + Send + Sync + 'static, { + /// Simple constructor pub fn new( provider: ExchangeRateProvider, failed_polls_before_invalidation: u32, @@ -256,47 +255,42 @@ where } } - pub fn fetch_on_interval(self, interval: Duration) -> impl Future { + /// Spawns a future which calls [`self.update_rates()`](./struct.ExchangeRateFetcher.html#method.update_rates) every `interval` + pub fn spawn_interval(self, interval: Duration) { debug!( "Starting interval to poll exchange rate provider: {:?} for rates", self.provider ); - Interval::new(Instant::now(), interval) - .map_err(|err| { - error!( - "Interval error, no longer fetching exchange rates: {:?}", - err - ); - }) - .for_each(move |_| { - self.update_rates().then(|_| { - // Ignore errors so that they don't cause the Interval to stop - Ok(()) - }) - }) - } - - pub fn spawn_interval(self, interval: Duration) { - spawn(self.fetch_on_interval(interval)); + let interval = async move { + let mut interval = tokio::time::interval(interval); + loop { + interval.tick().await; + // Ignore errors so that they don't cause the Interval to stop + let _ = self.update_rates().await; + } + }; + tokio::spawn(interval); } - fn fetch_rates(&self) -> impl Future, Error = ()> { + /// Calls the proper exchange rate provider + async fn fetch_rates(&self) -> Result, ()> { match self.provider { ExchangeRateProvider::CryptoCompare(ref api_key) => { - Either::A(query_cryptocompare(&self.client, api_key)) + query_cryptocompare(&self.client, api_key).await } - ExchangeRateProvider::CoinCap => Either::B(query_coincap(&self.client)), + ExchangeRateProvider::CoinCap => query_coincap(&self.client).await, } } - fn update_rates(&self) -> impl Future { + /// Gets the exchange rates and proceeds to update the store with the newly polled values + async fn update_rates(&self) -> Result<(), ()> { let consecutive_failed_polls = self.consecutive_failed_polls.clone(); let consecutive_failed_polls_zeroer = consecutive_failed_polls.clone(); let failed_polls_before_invalidation = self.failed_polls_before_invalidation; let store = self.store.clone(); let store_clone = self.store.clone(); let provider = self.provider.clone(); - self.fetch_rates() + let mut rates = self.fetch_rates() .map_err(move |_| { // Note that a race between the read on this line and the check on the line after // is quite unlikely as long as the interval between polls is reasonable. @@ -311,29 +305,27 @@ where panic!("Failed to clear exchange rates cache after exchange rates server became unresponsive"); } } - }) - .and_then(move |mut rates| { - trace!("Fetched exchange rates: {:?}", rates); - let num_rates = rates.len(); - rates.insert("USD".to_string(), 1.0); - if store_clone.set_exchange_rates(rates).is_ok() { - // Reset our invalidation counter - consecutive_failed_polls_zeroer.store(0, Ordering::Relaxed); - debug!("Updated {} exchange rates from {:?}", num_rates, provider); - Ok(()) - } else { - error!("Error setting exchange rates in store"); - Err(()) - } - }) + }).await?; + + trace!("Fetched exchange rates: {:?}", rates); + let num_rates = rates.len(); + rates.insert("USD".to_string(), 1.0); + if store_clone.set_exchange_rates(rates).is_ok() { + // Reset our invalidation counter + consecutive_failed_polls_zeroer.store(0, Ordering::Relaxed); + debug!("Updated {} exchange rates from {:?}", num_rates, provider); + Ok(()) + } else { + error!("Error setting exchange rates in store"); + Err(()) + } } } #[cfg(test)] mod tests { use super::*; - use futures::{future::ok, Future}; - use interledger_packet::{Address, FulfillBuilder, PrepareBuilder}; + use interledger_packet::{Address, Fulfill, FulfillBuilder, PrepareBuilder, Reject}; use interledger_service::{outgoing_service_fn, Account}; use lazy_static::lazy_static; use std::collections::HashMap; @@ -348,21 +340,21 @@ mod tests { pub static ref ALICE: Username = Username::from_str("alice").unwrap(); } - #[test] - fn exchange_rate_ok() { + #[tokio::test] + async fn exchange_rate_ok() { // if `to` is worth $2, and `from` is worth 1, then they receive half // the amount of units - let ret = exchange_rate(200, 1, 1.0, 1, 2.0, 0.0); + let ret = exchange_rate(200, 1, 1.0, 1, 2.0, 0.0).await; assert_eq!(ret.1[0].prepare.amount(), 100); - let ret = exchange_rate(1_000_000, 1, 3.0, 1, 2.0, 0.0); + let ret = exchange_rate(1_000_000, 1, 3.0, 1, 2.0, 0.0).await; assert_eq!(ret.1[0].prepare.amount(), 1_500_000); } - #[test] - fn exchange_conversion_error() { + #[tokio::test] + async fn exchange_conversion_error() { // rejects f64 that does not fit in u64 - let ret = exchange_rate(std::u64::MAX, 1, 2.0, 1, 1.0, 0.0); + let ret = exchange_rate(std::u64::MAX, 1, 2.0, 1, 1.0, 0.0).await; let reject = ret.0.unwrap_err(); assert_eq!(reject.code(), ErrorCode::F08_AMOUNT_TOO_LARGE); assert!(reject @@ -370,7 +362,7 @@ mod tests { .starts_with(b"Could not cast to f64, amount too large")); // rejects f64 which gets rounded down to 0 - let ret = exchange_rate(1, 2, 1.0, 1, 1.0, 0.0); + let ret = exchange_rate(1, 2, 1.0, 1, 1.0, 0.0).await; let reject = ret.0.unwrap_err(); assert_eq!(reject.code(), ErrorCode::R01_INSUFFICIENT_SOURCE_AMOUNT); assert!(reject @@ -378,38 +370,38 @@ mod tests { .starts_with(b"Could not cast to f64, amount too small")); // `Convert` errored - let ret = exchange_rate(std::u64::MAX, 1, std::f64::MAX, 255, 1.0, 0.0); + let ret = exchange_rate(std::u64::MAX, 1, std::f64::MAX, 255, 1.0, 0.0).await; let reject = ret.0.unwrap_err(); assert_eq!(reject.code(), ErrorCode::F08_AMOUNT_TOO_LARGE); assert!(reject.message().starts_with(b"Could not convert")); } - #[test] - fn applies_spread() { - let ret = exchange_rate(100, 1, 1.0, 1, 2.0, 0.01); + #[tokio::test] + async fn applies_spread() { + let ret = exchange_rate(100, 1, 1.0, 1, 2.0, 0.01).await; assert_eq!(ret.1[0].prepare.amount(), 49); // Negative spread is unusual but possible - let ret = exchange_rate(200, 1, 1.0, 1, 2.0, -0.01); + let ret = exchange_rate(200, 1, 1.0, 1, 2.0, -0.01).await; assert_eq!(ret.1[0].prepare.amount(), 101); // Rounds down - let ret = exchange_rate(4, 1, 1.0, 1, 2.0, 0.01); + let ret = exchange_rate(4, 1, 1.0, 1, 2.0, 0.01).await; // this would've been 2, but it becomes 1.99 and gets rounded down to 1 assert_eq!(ret.1[0].prepare.amount(), 1); // Spread >= 1 means the node takes everything - let ret = exchange_rate(10_000_000_000, 1, 1.0, 1, 2.0, 1.0); + let ret = exchange_rate(10_000_000_000, 1, 1.0, 1, 2.0, 1.0).await; assert_eq!(ret.1[0].prepare.amount(), 0); // Need to catch when spread > 1 - let ret = exchange_rate(10_000_000_000, 1, 1.0, 1, 2.0, 2.0); + let ret = exchange_rate(10_000_000_000, 1, 1.0, 1, 2.0, 2.0).await; assert_eq!(ret.1[0].prepare.amount(), 0); } // Instantiates an exchange rate service and returns the fulfill/reject // packet and the outgoing request after performing an asset conversion - fn exchange_rate( + async fn exchange_rate( amount: u64, scale1: u8, rate1: f64, @@ -421,11 +413,11 @@ mod tests { let requests_clone = requests.clone(); let outgoing = outgoing_service_fn(move |request| { requests_clone.lock().unwrap().push(request); - Box::new(ok(FulfillBuilder { + Ok(FulfillBuilder { fulfillment: &[0; 32], data: b"hello!", } - .build())) + .build()) }); let mut service = test_service(rate1, rate2, spread, outgoing); let result = service @@ -442,7 +434,7 @@ mod tests { } .build(), }) - .wait(); + .await; let reqs = requests.lock().unwrap(); (result, reqs.clone()) @@ -464,16 +456,14 @@ mod tests { } } + #[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } diff --git a/crates/interledger-service-util/src/expiry_shortener_service.rs b/crates/interledger-service-util/src/expiry_shortener_service.rs index db5658447..22f25eed9 100644 --- a/crates/interledger-service-util/src/expiry_shortener_service.rs +++ b/crates/interledger-service-util/src/expiry_shortener_service.rs @@ -1,11 +1,15 @@ +use async_trait::async_trait; use chrono::{DateTime, Duration, Utc}; -use interledger_service::{Account, OutgoingRequest, OutgoingService}; +use interledger_service::{Account, IlpResult, OutgoingRequest, OutgoingService}; use log::trace; pub const DEFAULT_ROUND_TRIP_TIME: u32 = 500; pub const DEFAULT_MAX_EXPIRY_DURATION: u32 = 30000; +/// An account with a round trip time, used by the [`ExpiryShortenerService`](./struct.ExpiryShortenerService.html) +/// to shorten a packet's expiration time to account for latency pub trait RoundTripTimeAccount: Account { + /// The account's round trip time fn round_trip_time(&self) -> u32 { DEFAULT_ROUND_TRIP_TIME } @@ -33,25 +37,26 @@ impl ExpiryShortenerService { } } + // TODO: This isn't used anywhere, should we remove it? + /// Sets the service's max expiry duration pub fn max_expiry_duration(&mut self, milliseconds: u32) -> &mut Self { self.max_expiry_duration = milliseconds; self } } +#[async_trait] impl OutgoingService for ExpiryShortenerService where - O: OutgoingService, - A: RoundTripTimeAccount, + O: OutgoingService + Send + Sync + 'static, + A: RoundTripTimeAccount + Send + Sync + 'static, { - type Future = O::Future; - /// On send request: /// 1. Get the sender and receiver's roundtrip time (default 1000ms) /// 2. Reduce the packet's expiry by that amount /// 3. Ensure that the packet expiry does not exceed the maximum expiry duration /// 4. Forward the request - fn send_request(&mut self, mut request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, mut request: OutgoingRequest) -> IlpResult { let time_to_subtract = i64::from(request.from.round_trip_time() + request.to.round_trip_time()); let new_expiry = DateTime::::from(request.prepare.expires_at()) @@ -70,14 +75,13 @@ where }; request.prepare.set_expires_at(new_expiry.into()); - self.next.send_request(request) + self.next.send_request(request).await } } #[cfg(test)] mod tests { use super::*; - use futures::Future; use interledger_packet::{Address, ErrorCode, FulfillBuilder, PrepareBuilder, RejectBuilder}; use interledger_service::{outgoing_service_fn, Username}; use std::str::FromStr; @@ -121,8 +125,8 @@ mod tests { } } - #[test] - fn shortens_expiry_by_round_trip_time() { + #[tokio::test] + async fn shortens_expiry_by_round_trip_time() { let original_expiry = Utc::now() + Duration::milliseconds(30000); let mut service = ExpiryShortenerService::new(outgoing_service_fn(move |request| { if DateTime::::from(request.prepare.expires_at()) @@ -157,12 +161,12 @@ mod tests { .build(), original_amount: 10, }) - .wait() + .await .expect("Should have shortened expiry"); } - #[test] - fn reduces_expiry_to_max_duration() { + #[tokio::test] + async fn reduces_expiry_to_max_duration() { let mut service = ExpiryShortenerService::new(outgoing_service_fn(move |request| { if DateTime::::from(request.prepare.expires_at()) - Utc::now() <= Duration::milliseconds(30000) @@ -196,7 +200,7 @@ mod tests { .build(), original_amount: 10, }) - .wait() + .await .expect("Should have shortened expiry"); } } diff --git a/crates/interledger-service-util/src/lib.rs b/crates/interledger-service-util/src/lib.rs index 40c2e1eef..6f897a19f 100644 --- a/crates/interledger-service-util/src/lib.rs +++ b/crates/interledger-service-util/src/lib.rs @@ -2,13 +2,23 @@ //! //! Miscellaneous, small Interledger Services. +/// Balance tracking service mod balance_service; +/// Service which implements the echo protocol mod echo_service; +/// Utilities for connecting to various exchange rate providers mod exchange_rate_providers; +/// Service responsible for setting and fetching dollar denominated exchange rates mod exchange_rates_service; +/// Service responsible for shortening the expiry time of packets, +/// to take into account for network latency mod expiry_shortener_service; +/// Service responsible for capping the amount an account can send in a packet mod max_packet_amount_service; +/// Service responsible for capping the amount of packets and amount in packets an account can send mod rate_limit_service; +/// Service responsible for checking that packets are not expired and that prepare packets' fulfillment conditions +/// match the fulfillment inside the incoming fulfills mod validator_service; pub use self::balance_service::{BalanceService, BalanceStore}; diff --git a/crates/interledger-service-util/src/max_packet_amount_service.rs b/crates/interledger-service-util/src/max_packet_amount_service.rs index 3dc0aeac0..a60658cbb 100644 --- a/crates/interledger-service-util/src/max_packet_amount_service.rs +++ b/crates/interledger-service-util/src/max_packet_amount_service.rs @@ -1,8 +1,10 @@ -use futures::future::err; +use async_trait::async_trait; use interledger_packet::{ErrorCode, MaxPacketAmountDetails, RejectBuilder}; use interledger_service::*; use log::debug; +/// Extension trait for [`Account`](../interledger_service/trait.Account.html) with the max packet amount +/// allowed for this account pub trait MaxPacketAmountAccount: Account { fn max_packet_amount(&self) -> u64; } @@ -22,26 +24,26 @@ pub struct MaxPacketAmountService { } impl MaxPacketAmountService { + /// Simple constructor pub fn new(store: S, next: I) -> Self { MaxPacketAmountService { store, next } } } +#[async_trait] impl IncomingService for MaxPacketAmountService where - I: IncomingService, - S: AddressStore, - A: MaxPacketAmountAccount, + I: IncomingService + Send + Sync + 'static, + S: AddressStore + Send + Sync + 'static, + A: MaxPacketAmountAccount + Send + Sync + 'static, { - type Future = BoxedIlpFuture; - /// On receive request: /// 1. if request.prepare.amount <= request.from.max_packet_amount forward the request, else error - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); let max_packet_amount = request.from.max_packet_amount(); if request.prepare.amount() <= max_packet_amount { - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await } else { debug!( "Prepare amount:{} exceeds max_packet_amount: {}", @@ -50,13 +52,13 @@ where ); let details = MaxPacketAmountDetails::new(request.prepare.amount(), max_packet_amount).to_bytes(); - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F08_AMOUNT_TOO_LARGE, message: &[], triggered_by: Some(&ilp_address), data: &details[..], } - .build())) + .build()) } } } diff --git a/crates/interledger-service-util/src/rate_limit_service.rs b/crates/interledger-service-util/src/rate_limit_service.rs index 28b5627f1..a7d5efaed 100644 --- a/crates/interledger-service-util/src/rate_limit_service.rs +++ b/crates/interledger-service-util/src/rate_limit_service.rs @@ -1,44 +1,56 @@ -use futures::{ - future::{err, Either}, - Future, -}; +use async_trait::async_trait; use interledger_packet::{ErrorCode, RejectBuilder}; -use interledger_service::{ - Account, AddressStore, BoxedIlpFuture, IncomingRequest, IncomingService, -}; +use interledger_service::{Account, AddressStore, IlpResult, IncomingRequest, IncomingService}; use log::{error, warn}; use std::marker::PhantomData; +/// Extension trait for [`Account`](../interledger_service/trait.Account.html) with rate limiting related information pub trait RateLimitAccount: Account { + /// The maximum packets per minute allowed for this account fn packets_per_minute_limit(&self) -> Option { None } + /// The maximum units per minute allowed for this account fn amount_per_minute_limit(&self) -> Option { None } } +/// Rate limiting related errors #[derive(Clone, Debug, PartialEq, Eq)] pub enum RateLimitError { + /// Account exceeded their packet limit PacketLimitExceeded, + /// Account exceeded their amount limit ThroughputLimitExceeded, + /// There was an internal error when trying to connect to the store StoreError, } +/// Store trait which manages the rate limit related information of accounts +#[async_trait] pub trait RateLimitStore { + /// The provided account must implement [`RateLimitAccount`](./trait.RateLimitAccount.html) type Account: RateLimitAccount; - fn apply_rate_limits( + /// Apply rate limits based on the packets per minute and amount of per minute + /// limits set on the provided account + async fn apply_rate_limits( &self, account: Self::Account, prepare_amount: u64, - ) -> Box + Send>; - fn refund_throughput_limit( + ) -> Result<(), RateLimitError>; + + /// Refunds the throughput limit which was charged to an account + /// Called if the node receives a reject packet after trying to forward + /// a packet to a peer, meaning that effectively reject packets do not + /// count towards a node's throughput limits + async fn refund_throughput_limit( &self, account: Self::Account, prepare_amount: u64, - ) -> Box + Send>; + ) -> Result<(), ()>; } /// # Rate Limit Service @@ -74,21 +86,20 @@ where } } +#[async_trait] impl IncomingService for RateLimitService where S: AddressStore + RateLimitStore + Clone + Send + Sync + 'static, I: IncomingService + Clone + Send + Sync + 'static, A: RateLimitAccount + Sync + 'static, { - type Future = BoxedIlpFuture; - /// On receiving a request: /// 1. Apply rate limit based on the sender of the request and the amount in the prepare packet in the request /// 1. If no limits were hit forward the request /// - If it succeeds, OK /// - If the request forwarding failed, the client should not be charged towards their throughput limit, so they are refunded, and return a reject /// 1. If the limit was hit, return a reject with the appropriate ErrorCode. - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let ilp_address = self.store.get_ilp_address(); let mut next = self.next.clone(); let store = self.store.clone(); @@ -98,43 +109,55 @@ where let has_throughput_limit = account.amount_per_minute_limit().is_some(); // request.from and request.amount are used for apply_rate_limits, can't the previous service // always set the account to have None for both? - Box::new(self.store.apply_rate_limits(request.from.clone(), request.prepare.amount()) - .map_err(move |err| { + match self + .store + .apply_rate_limits(request.from.clone(), request.prepare.amount()) + .await + { + Ok(_) => { + let packet = next.handle_request(request).await; + // If we did not get a fulfill, we should refund the sender + if packet.is_err() && has_throughput_limit { + let refunded = store + .refund_throughput_limit(account_clone, prepare_amount) + .await; + // if refunding failed, that's too bad, we will just return the reject + // from the peer + if let Err(err) = refunded { + error!("Error refunding throughput limit: {:?}", err); + } + } + + // return the packet + packet + } + Err(err) => { let code = match err { RateLimitError::PacketLimitExceeded => { if let Some(limit) = account.packets_per_minute_limit() { warn!("Account {} was rate limited for sending too many packets. Limit is: {} per minute", account.id(), limit); } ErrorCode::T05_RATE_LIMITED - }, + } RateLimitError::ThroughputLimitExceeded => { if let Some(limit) = account.amount_per_minute_limit() { warn!("Account {} was throughput limited for trying to send too much money. Limit is: {} per minute", account.id(), limit); } ErrorCode::T04_INSUFFICIENT_LIQUIDITY - }, + } RateLimitError::StoreError => ErrorCode::T00_INTERNAL_ERROR, }; - RejectBuilder { + + let reject = RejectBuilder { code, triggered_by: Some(&ilp_address), message: &[], data: &[], - }.build() - }) - .and_then(move |_| next.handle_request(request)) - .or_else(move |reject| { - if has_throughput_limit { - Either::A(store.refund_throughput_limit(account_clone, prepare_amount) - .then(|result| { - if let Err(err) = result { - error!("Error refunding throughput limit: {:?}", err); - } - Err(reject) - })) - } else { - Either::B(err(reject)) } - })) + .build(); + + Err(reject) + } + } } } diff --git a/crates/interledger-service-util/src/validator_service.rs b/crates/interledger-service-util/src/validator_service.rs index d8a04267a..38c5ef486 100644 --- a/crates/interledger-service-util/src/validator_service.rs +++ b/crates/interledger-service-util/src/validator_service.rs @@ -1,19 +1,18 @@ +use async_trait::async_trait; use chrono::{DateTime, Duration, Utc}; -use futures::{future::err, Future}; use hex; use interledger_packet::{ErrorCode, RejectBuilder}; use interledger_service::*; use log::error; use ring::digest::{digest, SHA256}; use std::marker::PhantomData; -use tokio::prelude::FutureExt; +use tokio::time::timeout; /// # Validator Service /// /// Incoming or Outgoing Service responsible for rejecting timed out /// requests and checking that fulfillments received match the `execution_condition` from the original `Prepare` packets. /// Forwards everything else. -/// #[derive(Clone)] pub struct ValidatorService { store: S, @@ -27,6 +26,8 @@ where S: AddressStore, A: Account, { + /// Create an incoming validator service + /// Forwards incoming requests if not expired, else rejects pub fn incoming(store: S, next: I) -> Self { ValidatorService { store, @@ -42,6 +43,9 @@ where S: AddressStore, A: Account, { + /// Create an outgoing validator service + /// If outgoing request is not expired, it checks that the provided fulfillment is a valid preimage to the + /// prepare packet's fulfillment condition, and if so it forwards it, else rejects pub fn outgoing(store: S, next: O) -> Self { ValidatorService { store, @@ -51,21 +55,20 @@ where } } +#[async_trait] impl IncomingService for ValidatorService where - I: IncomingService, - S: AddressStore, - A: Account, + I: IncomingService + Send + Sync, + S: AddressStore + Send + Sync, + A: Account + Send + Sync, { - type Future = BoxedIlpFuture; - /// On receiving a request: /// 1. If the prepare packet in the request is not expired, forward it, otherwise return a reject - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { let expires_at = DateTime::::from(request.prepare.expires_at()); let now = Utc::now(); if expires_at >= now { - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await } else { error!( "Incoming packet expired {}ms ago at {:?} (time now: {:?})", @@ -73,26 +76,24 @@ where expires_at.to_rfc3339(), expires_at.to_rfc3339(), ); - let result = Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::R00_TRANSFER_TIMED_OUT, message: &[], triggered_by: Some(&self.store.get_ilp_address()), data: &[], } - .build())); - Box::new(result) + .build()) } } } +#[async_trait] impl OutgoingService for ValidatorService where - O: OutgoingService, - S: AddressStore, - A: Account, + O: OutgoingService + Send + Sync, + S: AddressStore + Send + Sync, + A: Account + Send + Sync, { - type Future = BoxedIlpFuture; - /// On sending a request: /// 1. If the outgoing packet has expired, return a reject with the appropriate ErrorCode /// 1. Tries to forward the request @@ -101,7 +102,7 @@ where /// - If the forwarding is successful, it should receive a fulfill packet. Depending on if the hash of the fulfillment condition inside the fulfill is a preimage of the condition of the prepare: /// - return the fulfill if it matches /// - otherwise reject - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { let mut condition: [u8; 32] = [0; 32]; condition[..].copy_from_slice(request.prepare.execution_condition()); // why? @@ -109,60 +110,61 @@ where let now = Utc::now(); let time_left = expires_at - now; let ilp_address = self.store.get_ilp_address(); - let ilp_address_clone = ilp_address.clone(); if time_left > Duration::zero() { - Box::new( - self.next - .send_request(request) - .timeout(time_left.to_std().expect("Time left must be positive")) - .map_err(move |err| { - // If the error was caused by the timer, into_inner will return None - if let Some(reject) = err.into_inner() { - reject - } else { - error!( - "Outgoing request timed out after {}ms (expiry was: {})", - time_left.num_milliseconds(), - expires_at, - ); - RejectBuilder { - code: ErrorCode::R00_TRANSFER_TIMED_OUT, - message: &[], - triggered_by: Some(&ilp_address_clone), - data: &[], - } - .build() - } - }) - .and_then(move |fulfill| { - let generated_condition = digest(&SHA256, fulfill.fulfillment()); - if generated_condition.as_ref() == condition { - Ok(fulfill) - } else { - error!("Fulfillment did not match condition. Fulfillment: {}, hash: {}, actual condition: {}", hex::encode(fulfill.fulfillment()), hex::encode(generated_condition), hex::encode(condition)); - Err(RejectBuilder { - code: ErrorCode::F09_INVALID_PEER_RESPONSE, - message: b"Fulfillment did not match condition", - triggered_by: Some(&ilp_address), - data: &[], - } - .build()) - } - }), + // Result of the future + let result = timeout( + time_left.to_std().expect("Time left must be positive"), + self.next.send_request(request), ) + .await; + + let fulfill = match result { + // If the future completed in time, it returns an IlpResult, + // which gives us the fulfill packet + Ok(packet) => packet?, + // If the future timed out, then it results in an error + Err(_) => { + error!( + "Outgoing request timed out after {}ms (expiry was: {})", + time_left.num_milliseconds(), + expires_at, + ); + return Err(RejectBuilder { + code: ErrorCode::R00_TRANSFER_TIMED_OUT, + message: &[], + triggered_by: Some(&ilp_address), + data: &[], + } + .build()); + } + }; + + let generated_condition = digest(&SHA256, fulfill.fulfillment()); + if generated_condition.as_ref() == condition { + Ok(fulfill) + } else { + error!("Fulfillment did not match condition. Fulfillment: {}, hash: {}, actual condition: {}", hex::encode(fulfill.fulfillment()), hex::encode(generated_condition), hex::encode(condition)); + Err(RejectBuilder { + code: ErrorCode::F09_INVALID_PEER_RESPONSE, + message: b"Fulfillment did not match condition", + triggered_by: Some(&ilp_address), + data: &[], + } + .build()) + } } else { error!( "Outgoing packet expired {}ms ago", (Duration::zero() - time_left).num_milliseconds(), ); // Already expired - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::R00_TRANSFER_TIMED_OUT, message: &[], triggered_by: Some(&ilp_address), data: &[], } - .build())) + .build()) } } } @@ -212,16 +214,14 @@ impl Account for TestAccount { struct TestStore; #[cfg(test)] +#[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } @@ -241,8 +241,8 @@ mod incoming { time::{Duration, SystemTime}, }; - #[test] - fn lets_through_valid_incoming_packet() { + #[tokio::test] + async fn lets_through_valid_incoming_packet() { let requests = Arc::new(Mutex::new(Vec::new())); let requests_clone = requests.clone(); let mut validator = ValidatorService::incoming( @@ -271,14 +271,14 @@ mod incoming { } .build(), }) - .wait(); + .await; assert_eq!(requests.lock().unwrap().len(), 1); assert!(result.is_ok()); } - #[test] - fn rejects_expired_incoming_packet() { + #[tokio::test] + async fn rejects_expired_incoming_packet() { let requests = Arc::new(Mutex::new(Vec::new())); let requests_clone = requests.clone(); let mut validator = ValidatorService::incoming( @@ -307,7 +307,7 @@ mod incoming { } .build(), }) - .wait(); + .await; assert!(requests.lock().unwrap().is_empty()); assert!(result.is_err()); @@ -328,30 +328,8 @@ mod outgoing { time::{Duration, SystemTime}, }; - #[derive(Clone)] - struct TestStore; - - impl AddressStore for TestStore { - /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { - unimplemented!() - } - - fn clear_ilp_address(&self) -> Box + Send> { - unimplemented!() - } - - /// Get's the store's ilp address from memory - fn get_ilp_address(&self) -> Address { - Address::from_str("example.connector").unwrap() - } - } - - #[test] - fn lets_through_valid_outgoing_response() { + #[tokio::test] + async fn lets_through_valid_outgoing_response() { let requests = Arc::new(Mutex::new(Vec::new())); let requests_clone = requests.clone(); let mut validator = ValidatorService::outgoing( @@ -382,14 +360,14 @@ mod outgoing { } .build(), }) - .wait(); + .await; assert_eq!(requests.lock().unwrap().len(), 1); assert!(result.is_ok()); } - #[test] - fn returns_reject_instead_of_invalid_fulfillment() { + #[tokio::test] + async fn returns_reject_instead_of_invalid_fulfillment() { let requests = Arc::new(Mutex::new(Vec::new())); let requests_clone = requests.clone(); let mut validator = ValidatorService::outgoing( @@ -420,7 +398,7 @@ mod outgoing { } .build(), }) - .wait(); + .await; assert_eq!(requests.lock().unwrap().len(), 1); assert!(result.is_err()); diff --git a/crates/interledger-service/src/lib.rs b/crates/interledger-service/src/lib.rs index 61508f1fa..11ea447b6 100644 --- a/crates/interledger-service/src/lib.rs +++ b/crates/interledger-service/src/lib.rs @@ -140,8 +140,8 @@ pub trait IncomingService { /// and/or handle the result of calling the inner service. fn wrap(self, f: F) -> WrappedService where - F: Fn(IncomingRequest, Self) -> R, - R: Future + Send + 'static, + F: Send + Sync + Fn(IncomingRequest, Box + Send>) -> R, + R: Future, Self: Clone + Sized, { WrappedService::wrap_incoming(self, f) @@ -162,8 +162,8 @@ pub trait OutgoingService { /// and/or handle the result of calling the inner service. fn wrap(self, f: F) -> WrappedService where - F: Fn(OutgoingRequest, Self) -> R, - R: Future + Send + 'static, + F: Send + Sync + Fn(OutgoingRequest, Box + Send>) -> R, + R: Future, Self: Clone + Sized, { WrappedService::wrap_outgoing(self, f) @@ -259,10 +259,10 @@ pub struct WrappedService { impl WrappedService where - F: Fn(IncomingRequest, IO) -> R, + F: Send + Sync + Fn(IncomingRequest, Box + Send>) -> R, + R: Future, IO: IncomingService + Clone, A: Account, - R: Future + Send + 'static, { /// Wrap the given service such that the provided function will /// be called to handle each request. That function can @@ -280,22 +280,22 @@ where #[async_trait] impl IncomingService for WrappedService where - F: Fn(IncomingRequest, IO) -> R + Send + Sync, - IO: IncomingService + Send + Sync + Clone, - A: Account, + F: Send + Sync + Fn(IncomingRequest, Box + Send>) -> R, R: Future + Send + 'static, + IO: IncomingService + Send + Sync + Clone + 'static, + A: Account + Sync, { async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { - (self.f)(request, (*self.inner).clone()).await + (self.f)(request, Box::new((*self.inner).clone())).await } } impl WrappedService where - F: Fn(OutgoingRequest, IO) -> R, + F: Send + Sync + Fn(OutgoingRequest, Box + Send>) -> R, + R: Future, IO: OutgoingService + Clone, A: Account, - R: Future + Send + 'static, { /// Wrap the given service such that the provided function will /// be called to handle each request. That function can @@ -313,13 +313,13 @@ where #[async_trait] impl OutgoingService for WrappedService where - F: Fn(OutgoingRequest, IO) -> R + Send + Sync, - IO: OutgoingService + Clone + Send + Sync, - A: Account, + F: Send + Sync + Fn(OutgoingRequest, Box + Send>) -> R, R: Future + Send + 'static, + IO: OutgoingService + Send + Sync + Clone + 'static, + A: Account, { async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { - (self.f)(request, (*self.inner).clone()).await + (self.f)(request, Box::new((*self.inner).clone())).await } } @@ -345,3 +345,209 @@ pub trait AddressStore: Clone { /// (the value is stored in memory because it is read often by all services) fn get_ilp_address(&self) -> Address; } + +// Even though we wrap the types _a lot_ of times in multiple configurations +// the tests still build nearly instantly. The trick is to make the wrapping function +// take a trait object instead of the concrete type +#[cfg(test)] +mod tests { + use super::*; + use lazy_static::lazy_static; + use std::str::FromStr; + + #[test] + fn incoming_service_no_exponential_blowup_when_wrapping() { + // a normal async function + async fn foo( + request: IncomingRequest, + mut next: Box + Send>, + ) -> IlpResult { + next.handle_request(request).await + } + + // and with a closure (async closure are unstable) + let foo2 = move |request, mut next: Box + Send>| { + async move { next.handle_request(request).await } + }; + + // base layer + let s = BaseService; + // our first layer + let s: LayeredService<_, TestAccount> = LayeredService::new_incoming(s); + + // wrapped in our closure + let s = WrappedService::wrap_incoming(s, foo2); + let s = WrappedService::wrap_incoming(s, foo2); + let s = WrappedService::wrap_incoming(s, foo2); + let s = WrappedService::wrap_incoming(s, foo2); + let s = WrappedService::wrap_incoming(s, foo2); + + // wrap it again in the normal service + let s = LayeredService::new_incoming(s); + + // some short syntax + let s = s.wrap(foo2); + let s = s.wrap(foo); + + // called with the full syntax + let s = WrappedService::wrap_incoming(s, foo); + let s = WrappedService::wrap_incoming(s, foo); + let s = WrappedService::wrap_incoming(s, foo); + let s = WrappedService::wrap_incoming(s, foo); + let s = WrappedService::wrap_incoming(s, foo); + + // more short syntax + let s = s.wrap(foo2); + let s = s.wrap(foo2); + let _s = s.wrap(foo2); + } + + #[test] + fn outgoing_service_no_exponential_blowup_when_wrapping() { + // a normal async function + async fn foo( + request: OutgoingRequest, + mut next: Box + Send>, + ) -> IlpResult { + next.send_request(request).await + } + + // and with a closure (async closure are unstable) + let foo2 = move |request, mut next: Box + Send>| { + async move { next.send_request(request).await } + }; + + // base layer + let s = BaseService; + // our first layer + let s: LayeredService<_, TestAccount> = LayeredService::new_outgoing(s); + + // wrapped in our closure + let s = WrappedService::wrap_outgoing(s, foo2); + let s = WrappedService::wrap_outgoing(s, foo2); + let s = WrappedService::wrap_outgoing(s, foo2); + let s = WrappedService::wrap_outgoing(s, foo2); + let s = WrappedService::wrap_outgoing(s, foo2); + + // wrap it again in the normal service + let s = LayeredService::new_outgoing(s); + + // some short syntax + let s = s.wrap(foo2); + let s = s.wrap(foo); + + // called with the full syntax + let s = WrappedService::wrap_outgoing(s, foo); + let s = WrappedService::wrap_outgoing(s, foo); + let s = WrappedService::wrap_outgoing(s, foo); + let s = WrappedService::wrap_outgoing(s, foo); + let s = WrappedService::wrap_outgoing(s, foo); + + // more short syntax + let s = s.wrap(foo2); + let s = s.wrap(foo2); + let _s = s.wrap(foo2); + } + + #[derive(Clone)] + struct BaseService; + + #[derive(Clone)] + struct LayeredService { + next: I, + account_type: PhantomData, + } + + impl LayeredService + where + I: IncomingService + Send + Sync + 'static, + A: Account, + { + fn new_incoming(next: I) -> Self { + Self { + next, + account_type: PhantomData, + } + } + } + + impl LayeredService + where + I: OutgoingService + Send + Sync + 'static, + A: Account, + { + fn new_outgoing(next: I) -> Self { + Self { + next, + account_type: PhantomData, + } + } + } + + #[async_trait] + impl OutgoingService for BaseService { + async fn send_request(&mut self, _request: OutgoingRequest) -> IlpResult { + unimplemented!() + } + } + + #[async_trait] + impl IncomingService for BaseService { + async fn handle_request(&mut self, _request: IncomingRequest) -> IlpResult { + unimplemented!() + } + } + + #[async_trait] + impl OutgoingService for LayeredService + where + I: OutgoingService + Send + Sync + 'static, + A: Account + Send + Sync + 'static, + { + async fn send_request(&mut self, _request: OutgoingRequest) -> IlpResult { + unimplemented!() + } + } + + #[async_trait] + impl IncomingService for LayeredService + where + I: IncomingService + Send + Sync + 'static, + A: Account + Send + Sync + 'static, + { + async fn handle_request(&mut self, _request: IncomingRequest) -> IlpResult { + unimplemented!() + } + } + + // Account test helpers + #[derive(Clone, Debug)] + pub struct TestAccount; + + lazy_static! { + pub static ref ALICE: Username = Username::from_str("alice").unwrap(); + pub static ref EXAMPLE_ADDRESS: Address = Address::from_str("example.alice").unwrap(); + } + + impl Account for TestAccount { + fn id(&self) -> Uuid { + unimplemented!() + } + + fn username(&self) -> &Username { + &ALICE + } + + fn asset_scale(&self) -> u8 { + 9 + } + + fn asset_code(&self) -> &str { + "XYZ" + } + + fn ilp_address(&self) -> &Address { + &EXAMPLE_ADDRESS + } + } +} diff --git a/crates/interledger-settlement/Cargo.toml b/crates/interledger-settlement/Cargo.toml index 0060f6f57..e645c94fd 100644 --- a/crates/interledger-settlement/Cargo.toml +++ b/crates/interledger-settlement/Cargo.toml @@ -8,14 +8,14 @@ edition = "2018" repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] -bytes = { version = "0.4.12", default-features = false } -futures = { version = "0.1.29", default-features = false } -hyper = { version = "0.12.35", default-features = false } +bytes = { version = "0.5", default-features = false } +futures = { version = "0.3.1", default-features = false, features = ["compat"] } +hyper = { version = "0.13.1", default-features = false } interledger-http = { path = "../interledger-http", version = "^0.4.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } log = { version = "0.4.8", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10", default-features = false, features = ["default-tls", "json"] } serde = { version = "1.0.101", default-features = false } serde_json = { version = "1.0.41", default-features = false } url = { version = "2.1.0", default-features = false } @@ -23,12 +23,13 @@ lazy_static = { version = "1.4.0", default-features = false } uuid = { version = "0.8.1", default-features = false, features = ["v4"] } ring = { version = "0.16.9", default-features = false } tokio-retry = { version = "0.2.0", default-features = false } -tokio = { version = "0.1.22", default-features = false } +tokio = { version = "0.2.6", default-features = false, features = ["macros", "rt-core"] } num-bigint = { version = "0.2.3", default-features = false, features = ["std"] } num-traits = { version = "0.2.8", default-features = false } -warp = { version = "0.1.20", default-features = false } -http = "0.1.19" -redis_crate = { package = "redis", version = "0.13.0", optional = true } +warp = { version = "0.2", default-features = false } +http = "0.2.0" +redis_crate = { package = "redis", version = "0.15.1", optional = true, features = ["tokio-rt-core"] } +async-trait = "0.1.22" [dev-dependencies] parking_lot = { version = "0.9.0", default-features = false } diff --git a/crates/interledger-settlement/src/api/client.rs b/crates/interledger-settlement/src/api/client.rs index 9b8e4cb43..f8a07feb8 100644 --- a/crates/interledger-settlement/src/api/client.rs +++ b/crates/interledger-settlement/src/api/client.rs @@ -1,31 +1,37 @@ use crate::core::types::{Quantity, SettlementAccount}; -use futures::{ - future::{err, Either}, - Future, -}; +use futures::TryFutureExt; use interledger_service::Account; use log::{debug, error, trace}; -use reqwest::r#async::Client; +use reqwest::Client; use serde_json::json; use uuid::Uuid; +/// Helper struct to execute settlements #[derive(Clone)] pub struct SettlementClient { + /// Asynchronous reqwest client http_client: Client, } impl SettlementClient { + /// Simple constructor pub fn new() -> Self { SettlementClient { http_client: Client::new(), } } - pub fn send_settlement( + /// Sends a settlement request to the node's settlement engine for the provided account and amount + /// + /// # Errors + /// 1. Account has no engine configured + /// 1. HTTP request to engine failed from node side + /// 1. HTTP response from engine was an error + pub async fn send_settlement( &self, account: A, amount: u64, - ) -> impl Future { + ) -> Result<(), ()> { if let Some(settlement_engine) = account.settlement_engine_details() { let mut settlement_engine_url = settlement_engine.url; settlement_engine_url @@ -40,23 +46,37 @@ impl SettlementClient { ); let settlement_engine_url_clone = settlement_engine_url.clone(); let idempotency_uuid = Uuid::new_v4().to_hyphenated().to_string(); - return Either::A(self.http_client.post(settlement_engine_url.as_ref()) + let response = self + .http_client + .post(settlement_engine_url.as_ref()) .header("Idempotency-Key", idempotency_uuid) .json(&json!(Quantity::new(amount, account.asset_scale()))) .send() - .map_err(move |err| error!("Error sending settlement command to settlement engine {}: {:?}", settlement_engine_url, err)) - .and_then(move |response| { - if response.status().is_success() { - trace!("Sent settlement of {} to settlement engine: {}", amount, settlement_engine_url_clone); - Ok(()) - } else { - error!("Error sending settlement. Settlement engine responded with HTTP code: {}", response.status()); - Err(()) - } - })); + .map_err(move |err| { + error!( + "Error sending settlement command to settlement engine {}: {:?}", + settlement_engine_url, err + ) + }) + .await?; + + if response.status().is_success() { + trace!( + "Sent settlement of {} to settlement engine: {}", + amount, + settlement_engine_url_clone + ); + return Ok(()); + } else { + error!( + "Error sending settlement. Settlement engine responded with HTTP code: {}", + response.status() + ); + return Err(()); + } } error!("Cannot send settlement for account {} because it does not have the settlement_engine_url and scale configured", account.id()); - Either::B(err(())) + Err(()) } } @@ -70,37 +90,37 @@ impl Default for SettlementClient { mod tests { use super::*; use crate::api::fixtures::TEST_ACCOUNT_0; - use crate::api::test_helpers::{block_on, mock_settlement}; + use crate::api::test_helpers::mock_settlement; use mockito::Matcher; - #[test] - fn settlement_ok() { + #[tokio::test] + async fn settlement_ok() { let m = mock_settlement(200) .match_header("Idempotency-Key", Matcher::Any) .create(); let client = SettlementClient::new(); - let ret = block_on(client.send_settlement(TEST_ACCOUNT_0.clone(), 100)); + let ret = client.send_settlement(TEST_ACCOUNT_0.clone(), 100).await; m.assert(); assert!(ret.is_ok()); } - #[test] - fn engine_rejects() { + #[tokio::test] + async fn engine_rejects() { let m = mock_settlement(500) .match_header("Idempotency-Key", Matcher::Any) .create(); let client = SettlementClient::new(); - let ret = block_on(client.send_settlement(TEST_ACCOUNT_0.clone(), 100)); + let ret = client.send_settlement(TEST_ACCOUNT_0.clone(), 100).await; m.assert(); assert!(ret.is_err()); } - #[test] - fn account_does_not_have_settlement_engine() { + #[tokio::test] + async fn account_does_not_have_settlement_engine() { let m = mock_settlement(200) .expect(0) .match_header("Idempotency-Key", Matcher::Any) @@ -109,7 +129,7 @@ mod tests { let mut acc = TEST_ACCOUNT_0.clone(); acc.no_details = true; // Hide the settlement engine data from the account - let ret = block_on(client.send_settlement(acc, 100)); + let ret = client.send_settlement(acc, 100).await; m.assert(); assert!(ret.is_err()); diff --git a/crates/interledger-settlement/src/api/message_service.rs b/crates/interledger-settlement/src/api/message_service.rs index f91adcc4c..5ec27aab6 100644 --- a/crates/interledger-settlement/src/api/message_service.rs +++ b/crates/interledger-settlement/src/api/message_service.rs @@ -1,20 +1,24 @@ use crate::core::types::{SettlementAccount, SE_ILP_ADDRESS}; -use futures::{ - future::{err, Either}, - Future, Stream, -}; +use async_trait::async_trait; +use futures::{compat::Future01CompatExt, TryFutureExt}; use interledger_packet::{ErrorCode, FulfillBuilder, RejectBuilder}; -use interledger_service::{Account, BoxedIlpFuture, IncomingRequest, IncomingService}; +use interledger_service::{Account, IlpResult, IncomingRequest, IncomingService}; use log::error; -use reqwest::r#async::Client; +use reqwest::Client; use std::marker::PhantomData; use tokio_retry::{strategy::ExponentialBackoff, Retry}; const PEER_FULFILLMENT: [u8; 32] = [0; 32]; +/// Service which implements [`IncomingService`](../../interledger_service/trait.IncomingService.html). +/// Responsible for catching incoming requests which are sent to `peer.settle` and forward them to +/// the node's settlement engine via HTTP #[derive(Clone)] pub struct SettlementMessageService { + /// The next incoming service which requests that don't get caught get sent to next: I, + /// HTTP client used to notify the engine corresponding to the account about + /// an incoming message from a peer's engine http_client: Client, account_type: PhantomData, } @@ -33,14 +37,13 @@ where } } +#[async_trait] impl IncomingService for SettlementMessageService where I: IncomingService + Send, - A: SettlementAccount + Account, + A: SettlementAccount + Account + Send + Sync, { - type Future = BoxedIlpFuture; - - fn handle_request(&mut self, request: IncomingRequest) -> Self::Future { + async fn handle_request(&mut self, request: IncomingRequest) -> IlpResult { // Only handle the request if the destination address matches the ILP address // of the settlement engine being used for this account if let Some(settlement_engine_details) = request.from.settlement_engine_details() { @@ -67,54 +70,75 @@ where .header("Idempotency-Key", idempotency_uuid.clone()) .body(message.clone()) .send() + .compat() // Wrap to a 0.1 future }; - return Box::new(Retry::spawn(ExponentialBackoff::from_millis(10).take(10), action) - .map_err(move |error| { - error!("Error sending message to settlement engine: {:?}", error); - RejectBuilder { - code: ErrorCode::T00_INTERNAL_ERROR, - message: b"Error sending message to settlement engine", - data: &[], - triggered_by: Some(&SE_ILP_ADDRESS), - }.build() - }) - .and_then(move |response| { - let status = response.status(); - if status.is_success() { - Either::A(response.into_body().concat2().map_err(move |err| { - error!("Error concatenating settlement engine response body: {:?}", err); + // TODO: tokio-retry is still not on futures 0.3. As a result, we wrap our action in a + // 0.1 future, and then wrap the Retry future in a 0.3 future to use async/await. + let response = Retry::spawn(ExponentialBackoff::from_millis(10).take(10), action) + .compat() + .map_err(move |error| { + error!("Error sending message to settlement engine: {:?}", error); + RejectBuilder { + code: ErrorCode::T00_INTERNAL_ERROR, + message: b"Error sending message to settlement engine", + data: &[], + triggered_by: Some(&SE_ILP_ADDRESS), + } + .build() + }) + .await?; + let status = response.status(); + if status.is_success() { + let body = response + .bytes() + .map_err(|err| { + error!( + "Error concatenating settlement engine response body: {:?}", + err + ); RejectBuilder { code: ErrorCode::T00_INTERNAL_ERROR, message: b"Error getting settlement engine response", data: &[], triggered_by: Some(&SE_ILP_ADDRESS), - }.build() + } + .build() }) - .and_then(|body| { - Ok(FulfillBuilder { - fulfillment: &PEER_FULFILLMENT, - data: body.as_ref(), - }.build()) - })) + .await?; + + return Ok(FulfillBuilder { + fulfillment: &PEER_FULFILLMENT, + data: body.as_ref(), + } + .build()); + } else { + error!( + "Settlement engine rejected message with HTTP error code: {}", + response.status() + ); + let code = if status.is_client_error() { + ErrorCode::F00_BAD_REQUEST } else { - error!("Settlement engine rejected message with HTTP error code: {}", response.status()); - let code = if status.is_client_error() { - ErrorCode::F00_BAD_REQUEST - } else { - ErrorCode::T00_INTERNAL_ERROR - }; - Either::B(err(RejectBuilder { - code, - message: format!("Settlement engine rejected request with error code: {}", response.status()).as_str().as_ref(), - data: &[], - triggered_by: Some(&SE_ILP_ADDRESS), - }.build())) + ErrorCode::T00_INTERNAL_ERROR + }; + + return Err(RejectBuilder { + code, + message: format!( + "Settlement engine rejected request with error code: {}", + response.status() + ) + .as_str() + .as_ref(), + data: &[], + triggered_by: Some(&SE_ILP_ADDRESS), } - })); + .build()); + } } } - Box::new(self.next.handle_request(request)) + self.next.handle_request(request).await } } @@ -122,18 +146,18 @@ where mod tests { use super::*; use crate::api::fixtures::{BODY, DATA, SERVICE_ADDRESS, TEST_ACCOUNT_0}; - use crate::api::test_helpers::{block_on, mock_message, test_service}; + use crate::api::test_helpers::{mock_message, test_service}; use interledger_packet::{Address, Fulfill, PrepareBuilder, Reject}; use std::str::FromStr; use std::time::SystemTime; - #[test] - fn settlement_ok() { + #[tokio::test] + async fn settlement_ok() { // happy case let m = mock_message(200).create(); let mut settlement = test_service(); - let fulfill: Fulfill = block_on( - settlement.handle_request(IncomingRequest { + let fulfill: Fulfill = settlement + .handle_request(IncomingRequest { from: TEST_ACCOUNT_0.clone(), prepare: PrepareBuilder { amount: 0, @@ -143,22 +167,22 @@ mod tests { execution_condition: &[0; 32], } .build(), - }), - ) - .unwrap(); + }) + .await + .unwrap(); m.assert(); assert_eq!(fulfill.data(), BODY.as_bytes()); assert_eq!(fulfill.fulfillment(), &[0; 32]); } - #[test] - fn gets_forwarded_if_destination_not_engine_() { + #[tokio::test] + async fn gets_forwarded_if_destination_not_engine_() { let m = mock_message(200).create().expect(0); let mut settlement = test_service(); let destination = Address::from_str("example.some.address").unwrap(); - let reject: Reject = block_on( - settlement.handle_request(IncomingRequest { + let reject: Reject = settlement + .handle_request(IncomingRequest { from: TEST_ACCOUNT_0.clone(), prepare: PrepareBuilder { amount: 0, @@ -168,9 +192,9 @@ mod tests { execution_condition: &[0; 32], } .build(), - }), - ) - .unwrap_err(); + }) + .await + .unwrap_err(); m.assert(); assert_eq!(reject.code(), ErrorCode::F02_UNREACHABLE); @@ -178,14 +202,14 @@ mod tests { assert_eq!(reject.message(), b"No other incoming handler!" as &[u8],); } - #[test] - fn account_does_not_have_settlement_engine() { + #[tokio::test] + async fn account_does_not_have_settlement_engine() { let m = mock_message(200).create().expect(0); let mut settlement = test_service(); let mut acc = TEST_ACCOUNT_0.clone(); acc.no_details = true; // Hide the settlement engine data from the account - let reject: Reject = block_on( - settlement.handle_request(IncomingRequest { + let reject: Reject = settlement + .handle_request(IncomingRequest { from: acc.clone(), prepare: PrepareBuilder { amount: 0, @@ -195,9 +219,9 @@ mod tests { execution_condition: &[0; 32], } .build(), - }), - ) - .unwrap_err(); + }) + .await + .unwrap_err(); m.assert(); assert_eq!(reject.code(), ErrorCode::F02_UNREACHABLE); @@ -205,15 +229,15 @@ mod tests { assert_eq!(reject.message(), b"No other incoming handler!"); } - #[test] - fn settlement_engine_rejects() { + #[tokio::test] + async fn settlement_engine_rejects() { // for whatever reason the engine rejects our request with a 500 code let error_code = 500; let error_str = "Internal Server Error"; let m = mock_message(error_code).create(); let mut settlement = test_service(); - let reject: Reject = block_on( - settlement.handle_request(IncomingRequest { + let reject: Reject = settlement + .handle_request(IncomingRequest { from: TEST_ACCOUNT_0.clone(), prepare: PrepareBuilder { amount: 0, @@ -223,9 +247,9 @@ mod tests { execution_condition: &[0; 32], } .build(), - }), - ) - .unwrap_err(); + }) + .await + .unwrap_err(); m.assert(); assert_eq!(reject.code(), ErrorCode::T00_INTERNAL_ERROR); diff --git a/crates/interledger-settlement/src/api/mod.rs b/crates/interledger-settlement/src/api/mod.rs index 621b1769d..9f7510082 100644 --- a/crates/interledger-settlement/src/api/mod.rs +++ b/crates/interledger-settlement/src/api/mod.rs @@ -1,5 +1,9 @@ +/// Settlement-related client methods mod client; +/// [`IncomingService`](../../interledger_service/trait.IncomingService.html) which catches +/// incoming requests which are sent to `peer.settle` (the node's settlement engine ILP address) mod message_service; +/// The Warp API exposed by the connector mod node_api; #[cfg(test)] @@ -7,7 +11,6 @@ mod fixtures; #[cfg(test)] mod test_helpers; -// Expose the API creation filter method and the necessary services pub use client::SettlementClient; pub use message_service::SettlementMessageService; pub use node_api::create_settlements_filter; diff --git a/crates/interledger-settlement/src/api/node_api.rs b/crates/interledger-settlement/src/api/node_api.rs index d095eed11..e88aa348d 100644 --- a/crates/interledger-settlement/src/api/node_api.rs +++ b/crates/interledger-settlement/src/api/node_api.rs @@ -3,16 +3,12 @@ use crate::core::{ idempotency::*, scale_with_precision_loss, types::{ - ApiResponse, LeftoversStore, Quantity, SettlementAccount, SettlementStore, - CONVERSION_ERROR_TYPE, NO_ENGINE_CONFIGURED_ERROR_TYPE, SE_ILP_ADDRESS, + ApiResponse, ApiResult, LeftoversStore, Quantity, SettlementAccount, SettlementStore, + CONVERSION_ERROR_TYPE, SE_ILP_ADDRESS, }, }; -use bytes::buf::FromBuf; use bytes::Bytes; -use futures::{ - future::{err, result}, - Future, -}; +use futures::TryFutureExt; use hyper::{Response, StatusCode}; use interledger_http::error::*; use interledger_packet::PrepareBuilder; @@ -27,11 +23,96 @@ use std::{ use uuid::Uuid; use warp::{self, reject::Rejection, Filter}; +/// Prepare packet's execution condition as defined in the [RFC](https://interledger.org/rfcs/0038-settlement-engines/#exchanging-messages) static PEER_PROTOCOL_CONDITION: [u8; 32] = [ 102, 104, 122, 173, 248, 98, 189, 119, 108, 143, 193, 139, 142, 159, 142, 32, 8, 151, 20, 133, 110, 226, 51, 179, 144, 42, 89, 29, 13, 95, 41, 37, ]; +/// Makes an idempotent call to [`do_receive_settlement`](./fn.do_receive_settlement.html) +/// Returns Status Code 201 +async fn receive_settlement( + account_id: String, + idempotency_key: Option, + quantity: Quantity, + store: S, +) -> Result +where + S: LeftoversStore + + SettlementStore + + IdempotentStore + + AccountStore + + Clone + + Send + + Sync + + 'static, + A: SettlementAccount + Account + Send + Sync + 'static, +{ + let input = format!("{}{:?}", account_id, quantity); + let input_hash = get_hash_of(input.as_ref()); + + let idempotency_key_clone = idempotency_key.clone(); + let store_clone = store.clone(); + let (status_code, message) = make_idempotent_call( + store, + do_receive_settlement(store_clone, account_id, quantity, idempotency_key_clone), + input_hash, + idempotency_key, + StatusCode::CREATED, + "RECEIVED".into(), + ) + .await?; + Ok(Response::builder() + .status(status_code) + .body(message) + .unwrap()) +} + +/// Makes an idempotent call to [`do_send_outgoing_message`](./fn.do_send_outgoing_message.html) +/// Returns Status Code 201 +async fn send_message( + account_id: String, + idempotency_key: Option, + message: Bytes, + store: S, + outgoing_handler: O, +) -> Result +where + S: LeftoversStore + + SettlementStore + + IdempotentStore + + AccountStore + + Clone + + Send + + Sync + + 'static, + O: OutgoingService + Clone + Send + Sync + 'static, + A: SettlementAccount + Account + Send + Sync + 'static, +{ + let input = format!("{}{:?}", account_id, message); + let input_hash = get_hash_of(input.as_ref()); + + let store_clone = store.clone(); + let (status_code, message) = make_idempotent_call( + store, + do_send_outgoing_message(store_clone, outgoing_handler, account_id, message.to_vec()), + input_hash, + idempotency_key, + StatusCode::CREATED, + "SENT".into(), + ) + .await?; + Ok(Response::builder() + .status(status_code) + .body(message) + .unwrap()) +} + +/// Returns a Node Settlement filter which exposes a Warp-compatible +/// idempotent API which +/// 1. receives messages about incoming settlements from the engine +/// 1. sends messages from the connector's engine to the peer's +/// message service which are sent to the peer's engine pub fn create_settlements_filter( store: S, outgoing_handler: O, @@ -50,94 +131,31 @@ where { let with_store = warp::any().map(move || store.clone()).boxed(); let idempotency = warp::header::optional::("idempotency-key"); - let account_id_filter = warp::path("accounts").and(warp::path::param2::()); // account_id + let account_id_filter = warp::path("accounts").and(warp::path::param::()); // account_id // POST /accounts/:account_id/settlements (optional idempotency-key header) // Body is a Quantity object let settlement_endpoint = account_id_filter.and(warp::path("settlements")); - let settlements = warp::post2() + let settlements = warp::post() .and(settlement_endpoint) .and(warp::path::end()) .and(idempotency) .and(warp::body::json()) .and(with_store.clone()) - .and_then( - move |account_id: String, - idempotency_key: Option, - quantity: Quantity, - store: S| { - let input = format!("{}{:?}", account_id, quantity); - let input_hash = get_hash_of(input.as_ref()); - - let idempotency_key_clone = idempotency_key.clone(); - let store_clone = store.clone(); - let receive_settlement_fn = move || { - do_receive_settlement(store_clone, account_id, quantity, idempotency_key_clone) - }; - make_idempotent_call( - store, - receive_settlement_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - "RECEIVED".into(), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); + .and_then(receive_settlement); // POST /accounts/:account_id/messages (optional idempotency-key header) // Body is a Vec object let with_outgoing_handler = warp::any().map(move || outgoing_handler.clone()).boxed(); let messages_endpoint = account_id_filter.and(warp::path("messages")); - let messages = warp::post2() + let messages = warp::post() .and(messages_endpoint) .and(warp::path::end()) .and(idempotency) - .and(warp::body::concat()) - .and(with_store.clone()) - .and(with_outgoing_handler.clone()) - .and_then( - move |account_id: String, - idempotency_key: Option, - body: warp::body::FullBody, - store: S, - outgoing_handler: O| { - // Gets called by our settlement engine, forwards the request outwards - // until it reaches the peer's settlement engine. - let message = Vec::from_buf(body); - let input = format!("{}{:?}", account_id, message); - let input_hash = get_hash_of(input.as_ref()); - - let store_clone = store.clone(); - // Wrap do_send_outgoing_message in a closure to be invoked by - // the idempotency wrapper - let send_outgoing_message_fn = move || { - do_send_outgoing_message(store_clone, outgoing_handler, account_id, message) - }; - make_idempotent_call( - store, - send_outgoing_message_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - "SENT".into(), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); + .and(warp::body::bytes()) + .and(with_store) + .and(with_outgoing_handler) + .and_then(send_message); settlements .or(messages) @@ -145,12 +163,16 @@ where .boxed() } -fn do_receive_settlement( +/// Receives a settlement message from the connector's engine, proceeds to scale it to the +/// asset scale which corresponds to the account, and finally increases the account's balance +/// by the processed amount. This implements the main functionality by which an account's credit +/// is repaid, allowing them to send out more payments +async fn do_receive_settlement( store: S, account_id: String, body: Quantity, idempotency_key: Option, -) -> Box + Send> +) -> ApiResult where S: LeftoversStore + SettlementStore @@ -167,103 +189,111 @@ where let engine_scale = body.scale; // Convert to the desired data types - let account_id = match Uuid::from_str(&account_id) { - Ok(a) => a, - Err(_) => { - let error_msg = format!("Unable to parse account id: {}", account_id); - error!("{}", error_msg); - return Box::new(err(ApiError::invalid_account_id(Some(&account_id)))); - } - }; + let account_id = Uuid::from_str(&account_id).map_err(move |_| { + let err = ApiError::invalid_account_id(Some(&account_id)); + error!("{}", err); + err + })?; + + let engine_amount = BigUint::from_str(&engine_amount).map_err(|_| { + let error_msg = format!("Could not convert amount: {:?}", engine_amount); + error!("{}", error_msg); + ApiError::from_api_error_type(&CONVERSION_ERROR_TYPE).detail(error_msg) + })?; + + let accounts = store + .get_accounts(vec![account_id]) + .map_err(move |_| { + let err = ApiError::account_not_found() + .detail(format!("Account {} was not found", account_id)); + error!("{}", err); + err + }) + .await?; + + let account = &accounts[0]; + if account.settlement_engine_details().is_none() { + let err = ApiError::account_not_found().detail(format!("Account {} has no settlement engine details configured, cannot send a settlement engine message to that account", accounts[0].id())); + error!("{}", err); + return Err(err); + } - let engine_amount = match BigUint::from_str(&engine_amount) { - Ok(a) => a, - Err(_) => { - let error_msg = format!("Could not convert amount: {:?}", engine_amount); + let account_id = account.id(); + let asset_scale = account.asset_scale(); + // Scale to account's scale from the engine's scale + // If we're downscaling we might have some precision error which + // we must save as leftovers. Upscaling is OK since we're using + // biguint's. + let (scaled_engine_amount, precision_loss) = + scale_with_precision_loss(engine_amount, asset_scale, engine_scale); + + // This will load any leftovers (which are saved in the highest + // so far received scale by the engine), will scale them to + // the account's asset scale and return them. If there was any + // precision loss due to downscaling, it will also update the + // leftovers to the new leftovers value + let scaled_leftover_amount = store_clone + .load_uncredited_settlement_amount(account_id, asset_scale) + .map_err(move |_err| { + let error_msg = format!( + "Error getting uncredited settlement amount for: {}", + account.id() + ); error!("{}", error_msg); - return Box::new(err( - ApiError::from_api_error_type(&CONVERSION_ERROR_TYPE).detail(error_msg) - )); - } - }; + let error_type = ApiErrorType { + r#type: &ProblemType::Default, + status: StatusCode::INTERNAL_SERVER_ERROR, + title: "Load uncredited settlement amount error", + }; + ApiError::from_api_error_type(&error_type).detail(error_msg) + }) + .await?; + + // add the leftovers to the scaled engine amount + let total_amount = scaled_engine_amount.clone() + scaled_leftover_amount; + let engine_amount_u64 = total_amount.to_u64().unwrap_or(std::u64::MAX); + + let ret = futures::future::join_all(vec![ + // update the account's balance in the store + store.update_balance_for_incoming_settlement( + account_id, + engine_amount_u64, + idempotency_key, + ), + // save any precision loss that occurred during the + // scaling of the engine's amount to the account's scale + store.save_uncredited_settlement_amount(account_id, (precision_loss, engine_scale)), + ]) + .await; + + // if any of the futures errored, then we should propagate that + if ret.iter().any(|r| r.is_err()) { + let error_msg = format!( + "Error updating the balance and leftovers of account: {}", + account_id + ); + error!("{}", error_msg); + let error_type = ApiErrorType { + r#type: &ProblemType::Default, + status: StatusCode::INTERNAL_SERVER_ERROR, + title: "Balance update error", + }; + return Err(ApiError::from_api_error_type(&error_type).detail(error_msg)); + } - Box::new( - store.get_accounts(vec![account_id]) - .map_err(move |_err| { - let err = ApiError::account_not_found().detail(format!("Account {} was not found", account_id)); - error!("{}", err); - err - }) - .and_then(move |accounts| { - let account = &accounts[0]; - if account.settlement_engine_details().is_some() { - Ok(account.clone()) - } else { - let error_msg = format!("Account {} does not have settlement engine details configured. Cannot handle incoming settlement", account.id()); - error!("{}", error_msg); - Err(ApiError::from_api_error_type(&NO_ENGINE_CONFIGURED_ERROR_TYPE).detail(error_msg)) - } - }) - .and_then(move |account| { - let account_id = account.id(); - let asset_scale = account.asset_scale(); - // Scale to account's scale from the engine's scale - // If we're downscaling we might have some precision error which - // we must save as leftovers. Upscaling is OK since we're using - // biguint's. - let (scaled_engine_amount, precision_loss) = scale_with_precision_loss(engine_amount, asset_scale, engine_scale); - - // This will load any leftovers (which are saved in the highest - // so far received scale by the engine), will scale them to - // the account's asset scale and return them. If there was any - // precision loss due to downscaling, it will also update the - // leftovers to the new leftovers value - store_clone.load_uncredited_settlement_amount(account_id, asset_scale) - .map_err(move |_err| { - let error_msg = format!("Error getting uncredited settlement amount for: {}", account.id()); - error!("{}", error_msg); - let error_type = ApiErrorType { - r#type: &ProblemType::Default, - status: StatusCode::INTERNAL_SERVER_ERROR, - title: "Load uncredited settlement amount error", - }; - ApiError::from_api_error_type(&error_type).detail(error_msg) - }) - .and_then(move |scaled_leftover_amount| { - // add the leftovers to the scaled engine amount - let total_amount = scaled_engine_amount.clone() + scaled_leftover_amount; - let engine_amount_u64 = total_amount.to_u64().unwrap_or(std::u64::MAX); - - futures::future::join_all(vec![ - // update the account's balance in the store - store.update_balance_for_incoming_settlement(account_id, engine_amount_u64, idempotency_key), - // save any precision loss that occurred during the - // scaling of the engine's amount to the account's scale - store.save_uncredited_settlement_amount(account_id, (precision_loss, engine_scale)) - ]) - .map_err(move |_| { - let error_msg = format!("Error updating the balance and leftovers of account: {}", account_id); - error!("{}", error_msg); - let error_type = ApiErrorType { - r#type: &ProblemType::Default, - status: StatusCode::INTERNAL_SERVER_ERROR, - title: "Balance update error" - }; - ApiError::from_api_error_type(&error_type).detail(error_msg) - }) - .and_then(move |_| { - Ok(ApiResponse::Default) - }) - }) - })) + Ok(ApiResponse::Default) } -fn do_send_outgoing_message( +/// Sends a messages via the provided `outgoing_handler` with the `peer.settle` +/// ILP Address as ultimate destination. This messages should get caught by the +/// peer's message service, get forwarded to their engine, and then the response +/// should be communicated back via a Fulfill or Reject packet +async fn do_send_outgoing_message( store: S, - mut outgoing_handler: O, + outgoing_handler: O, account_id: String, body: Vec, -) -> Box + Send> +) -> ApiResult where S: LeftoversStore + SettlementStore @@ -271,69 +301,74 @@ where + AccountStore + Clone + Send - + Sync - + 'static, - O: OutgoingService + Clone + Send + Sync + 'static, - A: SettlementAccount + Account + Send + Sync + 'static, + + Sync, + O: OutgoingService + Clone + Send + Sync, + A: SettlementAccount + Account + Send + Sync, { - Box::new(result(Uuid::from_str(&account_id) - .map_err(move |_| { - let err = ApiError::invalid_account_id(Some(&account_id)); - error!("{}", err); - err - })) - .and_then(move |account_id| { - store.get_accounts(vec![account_id]) - .map_err(move |_| { - let err = ApiError::account_not_found().detail(format!("Account {} was not found", account_id)); - error!("{}", err); - err - }) - }) - .and_then(|accounts| { - let account = &accounts[0]; - if account.settlement_engine_details().is_some() { - Ok(account.clone()) - } else { - let err = ApiError::account_not_found().detail(format!("Account {} has no settlement engine details configured, cannot send a settlement engine message to that account", accounts[0].id())); - error!("{}", err); - Err(err) + let account_id = Uuid::from_str(&account_id).map_err(move |_| { + let err = ApiError::invalid_account_id(Some(&account_id)); + error!("{}", err); + err + })?; + let accounts = store + .get_accounts(vec![account_id]) + .map_err(move |_| { + let err = ApiError::account_not_found() + .detail(format!("Account {} was not found", account_id)); + error!("{}", err); + err + }) + .await?; + + let account = &accounts[0]; + if account.settlement_engine_details().is_none() { + let err = ApiError::account_not_found().detail(format!("Account {} has no settlement engine details configured, cannot send a settlement engine message to that account", accounts[0].id())); + error!("{}", err); + return Err(err); + } + + // Send the message to the peer's settlement engine. + // Note that we use dummy values for the `from` and `original_amount` + // because this `OutgoingRequest` will bypass the router and thus will not + // use either of these values. Including dummy values in the rare case where + // we do not need them seems easier than using + // `Option`s all over the place. + let packet = { + let mut handler = outgoing_handler.clone(); + handler + .send_request(OutgoingRequest { + from: account.clone(), + to: account.clone(), + original_amount: 0, + prepare: PrepareBuilder { + destination: SE_ILP_ADDRESS.clone(), + amount: 0, + expires_at: SystemTime::now() + Duration::from_secs(30), + data: &body, + execution_condition: &PEER_PROTOCOL_CONDITION, } + .build(), }) - .and_then(move |account| { - // Send the message to the peer's settlement engine. - // Note that we use dummy values for the `from` and `original_amount` - // because this `OutgoingRequest` will bypass the router and thus will not - // use either of these values. Including dummy values in the rare case where - // we do not need them seems easier than using - // `Option`s all over the place. - outgoing_handler.send_request(OutgoingRequest { - from: account.clone(), - to: account.clone(), - original_amount: 0, - prepare: PrepareBuilder { - destination: SE_ILP_ADDRESS.clone(), - amount: 0, - expires_at: SystemTime::now() + Duration::from_secs(30), - data: &body, - execution_condition: &PEER_PROTOCOL_CONDITION, - }.build() - }) - .map_err(move |reject| { - let error_msg = format!("Error sending message to peer settlement engine. Packet rejected with code: {}, message: {}", reject.code(), str::from_utf8(reject.message()).unwrap_or_default()); - let error_type = ApiErrorType { - r#type: &ProblemType::Default, - status: StatusCode::BAD_GATEWAY, - title: "Error sending message to peer engine", - }; - error!("{}", error_msg); - ApiError::from_api_error_type(&error_type).detail(error_msg) - }) - }) - .and_then(move |fulfill| { - let data = Bytes::from(fulfill.data()); - Ok(ApiResponse::Data(data)) - })) + .await + }; + + match packet { + Ok(fulfill) => { + // TODO: Can we avoid copying here? + let data = Bytes::copy_from_slice(fulfill.data()); + Ok(ApiResponse::Data(data)) + } + Err(reject) => { + let error_msg = format!("Error sending message to peer settlement engine. Packet rejected with code: {}, message: {}", reject.code(), str::from_utf8(reject.message()).unwrap_or_default()); + let error_type = ApiErrorType { + r#type: &ProblemType::Default, + status: StatusCode::BAD_GATEWAY, + title: "Error sending message to peer engine", + }; + error!("{}", error_msg); + Err(ApiError::from_api_error_type(&error_type).detail(error_msg)) + } + } } #[cfg(test)] @@ -371,11 +406,12 @@ mod tests { // Settlement Tests mod settlement_tests { use super::*; + use bytes::Bytes; use serde_json::json; const OUR_SCALE: u8 = 11; - fn settlement_call( + async fn settlement_call( api: &F, id: &str, amount: u64, @@ -394,11 +430,11 @@ mod tests { if let Some(idempotency_key) = idempotency_key { response = response.header("Idempotency-Key", idempotency_key); } - response.reply(api) + response.reply(api).await } - #[test] - fn settlement_ok() { + #[tokio::test] + async fn settlement_ok() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, true); let api = test_api(store.clone(), false); @@ -407,17 +443,17 @@ mod tests { // = 9. When // we send a settlement with scale OUR_SCALE, the connector should respond // with 2 less 0's. - let response = settlement_call(&api, &id, 200, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, &id, 200, OUR_SCALE, Some(IDEMPOTENCY)).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(response.status(), StatusCode::CREATED); // check that it's idempotent - let response = settlement_call(&api, &id, 200, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, &id, 200, OUR_SCALE, Some(IDEMPOTENCY)).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(response.status(), StatusCode::CREATED); // fails with different account id - let response = settlement_call(&api, "2", 200, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, "2", 200, OUR_SCALE, Some(IDEMPOTENCY)).await; check_error_status_and_message( response, 409, @@ -425,7 +461,7 @@ mod tests { ); // fails with different settlement data and account id - let response = settlement_call(&api, "2", 42, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, "2", 42, OUR_SCALE, Some(IDEMPOTENCY)).await; check_error_status_and_message( response, 409, @@ -433,7 +469,7 @@ mod tests { ); // fails with different settlement data and same account id - let response = settlement_call(&api, &id, 42, OUR_SCALE, Some(IDEMPOTENCY)); + let response = settlement_call(&api, &id, 42, OUR_SCALE, Some(IDEMPOTENCY)).await; check_error_status_and_message( response, 409, @@ -441,7 +477,7 @@ mod tests { ); // works without idempotency key - let response = settlement_call(&api, &id, 400, OUR_SCALE, None); + let response = settlement_call(&api, &id, 400, OUR_SCALE, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(response.status(), StatusCode::CREATED); @@ -452,19 +488,19 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 4); assert_eq!(cached_data.status, StatusCode::CREATED); - assert_eq!(cached_data.body, &Bytes::from("RECEIVED")); + assert_eq!(cached_data.body, &bytes::Bytes::from("RECEIVED")); } - #[test] + #[tokio::test] // The connector must save the difference each time there's precision // loss and try to add it the amount it's being notified to settle for the next time. - fn settlement_leftovers() { + async fn settlement_leftovers() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, true); let api = test_api(store.clone(), false); // Send 205 with scale 11, 2 decimals lost -> 0.05 leftovers - let response = settlement_call(&api, &id, 205, 11, None); + let response = settlement_call(&api, &id, 205, 11, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); // balance should be 2 @@ -472,75 +508,75 @@ mod tests { assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(5u32), 11) ); // Send 855 with scale 12, 3 decimals lost -> 0.855 leftovers, - let response = settlement_call(&api, &id, 855, 12, None); + let response = settlement_call(&api, &id, 855, 12, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(store.get_balance(TEST_ACCOUNT_0.id), 2); // total leftover: 0.905 = 0.05 + 0.855 assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(905u32), 12) ); // send 110 with scale 11, 2 decimals lost -> 0.1 leftover - let response = settlement_call(&api, &id, 110, 11, None); + let response = settlement_call(&api, &id, 110, 11, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(store.get_balance(TEST_ACCOUNT_0.id), 3); assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(1005u32), 12) ); // send 5 with scale 9, will consume the leftovers and increase // total balance by 6 while updating the rest of the leftovers - let response = settlement_call(&api, &id, 5, 9, None); + let response = settlement_call(&api, &id, 5, 9, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(store.get_balance(TEST_ACCOUNT_0.id), 9); assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(5u32), 12) ); // we send a payment with a smaller scale than the account now - let response = settlement_call(&api, &id, 2, 7, None); + let response = settlement_call(&api, &id, 2, 7, None).await; assert_eq!(response.body(), &Bytes::from("RECEIVED")); assert_eq!(store.get_balance(TEST_ACCOUNT_0.id), 209); // leftovers are still the same assert_eq!( store .get_uncredited_settlement_amount(TEST_ACCOUNT_0.id) - .wait() + .await .unwrap(), (BigUint::from(5u32), 12) ); } - #[test] - fn account_has_no_engine_configured() { + #[tokio::test] + async fn account_has_no_engine_configured() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, false); let api = test_api(store.clone(), false); - let response = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); - check_error_status_and_message(response, 404, "Account 00000000-0000-0000-0000-000000000000 does not have settlement engine details configured. Cannot handle incoming settlement"); + let response = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; + check_error_status_and_message(response, 404, "Account 00000000-0000-0000-0000-000000000000 has no settlement engine details configured, cannot send a settlement engine message to that account"); // check that it's idempotent - let response = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); - check_error_status_and_message(response, 404, "Account 00000000-0000-0000-0000-000000000000 does not have settlement engine details configured. Cannot handle incoming settlement"); + let response = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; + check_error_status_and_message(response, 404, "Account 00000000-0000-0000-0000-000000000000 has no settlement engine details configured, cannot send a settlement engine message to that account"); let s = store.clone(); let cache = s.cache.read(); @@ -549,34 +585,34 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 1); assert_eq!(cached_data.status, 404); - assert_eq!(cached_data.body, &Bytes::from("Account 00000000-0000-0000-0000-000000000000 does not have settlement engine details configured. Cannot handle incoming settlement")); + assert_eq!(cached_data.body, &bytes::Bytes::from("Account 00000000-0000-0000-0000-000000000000 has no settlement engine details configured, cannot send a settlement engine message to that account")); } - #[test] - fn update_balance_for_incoming_settlement_fails() { + #[tokio::test] + async fn update_balance_for_incoming_settlement_fails() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(true, true); let api = test_api(store, false); - let response = settlement_call(&api, &id, 100, 18, None); + let response = settlement_call(&api, &id, 100, 18, None).await; assert_eq!(response.status().as_u16(), 500); } - #[test] - fn invalid_account_id() { + #[tokio::test] + async fn invalid_account_id() { // the api is configured to take an accountId type // supplying an id that cannot be parsed to that type must fail let id = "a".to_string(); let store = test_store(false, true); let api = test_api(store.clone(), false); - let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); // check that it's idempotent - let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); - let _ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let _ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; let s = store.clone(); let cache = s.cache.read(); @@ -585,23 +621,26 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 2); assert_eq!(cached_data.status, 400); - assert_eq!(cached_data.body, &Bytes::from("a is an invalid account ID")); + assert_eq!( + cached_data.body, + &bytes::Bytes::from("a is an invalid account ID") + ); } - #[test] - fn account_not_in_store() { + #[tokio::test] + async fn account_not_in_store() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = TestStore::new(vec![], false); let api = test_api(store.clone(), false); - let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 404, "Account 00000000-0000-0000-0000-000000000000 was not found", ); - let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)); + let ret = settlement_call(&api, &id, 100, 18, Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 404, @@ -616,15 +655,16 @@ mod tests { assert_eq!(cached_data.status, 404); assert_eq!( cached_data.body, - &Bytes::from("Account 00000000-0000-0000-0000-000000000000 was not found") + &bytes::Bytes::from("Account 00000000-0000-0000-0000-000000000000 was not found") ); } } mod message_tests { use super::*; + use bytes::Bytes; - fn messages_call( + async fn messages_call( api: &F, id: &str, message: &[u8], @@ -642,26 +682,26 @@ mod tests { if let Some(idempotency_key) = idempotency_key { response = response.header("Idempotency-Key", idempotency_key); } - response.reply(api) + response.reply(api).await } - #[test] - fn message_ok() { + #[tokio::test] + async fn message_ok() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, true); let api = test_api(store.clone(), true); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; assert_eq!(ret.status(), StatusCode::CREATED); assert_eq!(ret.body(), &Bytes::from("hello!")); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; assert_eq!(ret.status(), StatusCode::CREATED); assert_eq!(ret.body(), &Bytes::from("hello!")); // Using the same idempotency key with different arguments MUST // fail. - let ret = messages_call(&api, "1", &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, "1", &[], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 409, @@ -670,7 +710,7 @@ mod tests { let data = [0, 1, 2]; // fails with different account id and data - let ret = messages_call(&api, "1", &data[..], Some(IDEMPOTENCY)); + let ret = messages_call(&api, "1", &data[..], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 409, @@ -678,7 +718,7 @@ mod tests { ); // fails for same account id but different data - let ret = messages_call(&api, &id, &data[..], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &data[..], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 409, @@ -691,19 +731,19 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 4); assert_eq!(cached_data.status, StatusCode::CREATED); - assert_eq!(cached_data.body, &Bytes::from("hello!")); + assert_eq!(cached_data.body, &bytes::Bytes::from("hello!")); } - #[test] - fn message_gets_rejected() { + #[tokio::test] + async fn message_gets_rejected() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = test_store(false, true); let api = test_api(store.clone(), false); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 502, "Error sending message to peer settlement engine. Packet rejected with code: F02, message: No other outgoing handler!"); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 502, "Error sending message to peer settlement engine. Packet rejected with code: F02, message: No other outgoing handler!"); let s = store.clone(); @@ -712,24 +752,24 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 1); assert_eq!(cached_data.status, 502); - assert_eq!(cached_data.body, &Bytes::from("Error sending message to peer settlement engine. Packet rejected with code: F02, message: No other outgoing handler!")); + assert_eq!(cached_data.body, &bytes::Bytes::from("Error sending message to peer settlement engine. Packet rejected with code: F02, message: No other outgoing handler!")); } - #[test] - fn invalid_account_id() { + #[tokio::test] + async fn invalid_account_id() { // the api is configured to take an accountId type // supplying an id that cannot be parsed to that type must fail let id = "a".to_string(); let store = test_store(false, true); let api = test_api(store.clone(), true); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message(ret, 400, "a is an invalid account ID"); let s = store.clone(); @@ -739,23 +779,26 @@ mod tests { let cache_hits = s.cache_hits.read(); assert_eq!(*cache_hits, 2); assert_eq!(cached_data.status, 400); - assert_eq!(cached_data.body, &Bytes::from("a is an invalid account ID")); + assert_eq!( + cached_data.body, + &bytes::Bytes::from("a is an invalid account ID") + ); } - #[test] - fn account_not_in_store() { + #[tokio::test] + async fn account_not_in_store() { let id = TEST_ACCOUNT_0.clone().id.to_string(); let store = TestStore::new(vec![], false); let api = test_api(store.clone(), true); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 404, "Account 00000000-0000-0000-0000-000000000000 was not found", ); - let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)); + let ret = messages_call(&api, &id, &[], Some(IDEMPOTENCY)).await; check_error_status_and_message( ret, 404, @@ -771,7 +814,7 @@ mod tests { assert_eq!(cached_data.status, 404); assert_eq!( cached_data.body, - &Bytes::from("Account 00000000-0000-0000-0000-000000000000 was not found") + &bytes::Bytes::from("Account 00000000-0000-0000-0000-000000000000 was not found") ); } } diff --git a/crates/interledger-settlement/src/api/test_helpers.rs b/crates/interledger-settlement/src/api/test_helpers.rs index ac207ab03..c3abe8363 100644 --- a/crates/interledger-settlement/src/api/test_helpers.rs +++ b/crates/interledger-settlement/src/api/test_helpers.rs @@ -8,10 +8,6 @@ use crate::core::{ }, }; use bytes::Bytes; -use futures::{ - future::{err, ok}, - Future, -}; use hyper::StatusCode; use interledger_packet::{Address, ErrorCode, FulfillBuilder, RejectBuilder}; use interledger_service::{ @@ -22,12 +18,13 @@ use num_bigint::BigUint; use uuid::Uuid; use super::fixtures::{BODY, MESSAGES_API, SERVICE_ADDRESS, SETTLEMENT_API, TEST_ACCOUNT_0}; +use async_trait::async_trait; use lazy_static::lazy_static; use parking_lot::RwLock; +use std::cmp::Ordering; use std::collections::HashMap; use std::str::FromStr; use std::sync::Arc; -use tokio::runtime::Runtime; use url::Url; #[derive(Debug, Clone)] @@ -86,73 +83,75 @@ pub struct TestStore { pub uncredited_settlement_amount: Arc>>, } +#[async_trait] impl SettlementStore for TestStore { type Account = TestAccount; - fn update_balance_for_incoming_settlement( + async fn update_balance_for_incoming_settlement( &self, account_id: Uuid, amount: u64, _idempotency_key: Option, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut accounts = self.accounts.write(); for mut a in &mut *accounts { if a.id() == account_id { a.balance += amount as i64; } } - let ret = if self.should_fail { err(()) } else { ok(()) }; - Box::new(ret) + if self.should_fail { + Err(()) + } else { + Ok(()) + } } - fn refund_settlement( - &self, - _account_id: Uuid, - _settle_amount: u64, - ) -> Box + Send> { - let ret = if self.should_fail { err(()) } else { ok(()) }; - Box::new(ret) + async fn refund_settlement(&self, _account_id: Uuid, _settle_amount: u64) -> Result<(), ()> { + if self.should_fail { + Err(()) + } else { + Ok(()) + } } } +#[async_trait] impl IdempotentStore for TestStore { - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let cache = self.cache.read(); if let Some(data) = cache.get(&idempotency_key) { let mut guard = self.cache_hits.write(); *guard += 1; // used to test how many times this branch gets executed - Box::new(ok(Some(data.clone()))) + Ok(Some(data.clone())) } else { - Box::new(ok(None)) + Ok(None) } } - fn save_idempotent_data( + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut cache = self.cache.write(); cache.insert( idempotency_key, IdempotentData::new(status_code, data, input_hash), ); - Box::new(ok(())) + Ok(()) } } +#[async_trait] impl AccountStore for TestStore { type Account = TestAccount; - fn get_accounts( - &self, - account_ids: Vec, - ) -> Box, Error = ()> + Send> { + async fn get_accounts(&self, account_ids: Vec) -> Result, ()> { let accounts: Vec = self .accounts .read() @@ -166,75 +165,77 @@ impl AccountStore for TestStore { }) .collect(); if accounts.len() == account_ids.len() { - Box::new(ok(accounts)) + Ok(accounts) } else { - Box::new(err(())) + Err(()) } } // stub implementation (not used in these tests) - fn get_account_id_from_username( - &self, - _username: &Username, - ) -> Box + Send> { - Box::new(ok(Uuid::new_v4())) + async fn get_account_id_from_username(&self, _username: &Username) -> Result { + Ok(Uuid::new_v4()) } } +#[async_trait] impl LeftoversStore for TestStore { type AccountId = Uuid; type AssetType = BigUint; - fn save_uncredited_settlement_amount( + async fn save_uncredited_settlement_amount( &self, account_id: Uuid, uncredited_settlement_amount: (Self::AssetType, u8), - ) -> Box + Send> { + ) -> Result<(), ()> { let mut guard = self.uncredited_settlement_amount.write(); if let Some(leftovers) = (*guard).get_mut(&account_id) { - if leftovers.1 > uncredited_settlement_amount.1 { - // the current leftovers maintain the scale so we just need to - // upscale the provided leftovers to the existing leftovers' scale - let scaled = uncredited_settlement_amount - .0 - .normalize_scale(ConvertDetails { - from: uncredited_settlement_amount.1, - to: leftovers.1, - }) - .unwrap(); - *leftovers = (leftovers.0.clone() + scaled, leftovers.1); - } else if leftovers.1 == uncredited_settlement_amount.1 { - *leftovers = ( - leftovers.0.clone() + uncredited_settlement_amount.0, - leftovers.1, - ); - } else { - // if the scale of the provided leftovers is bigger than - // existing scale then we update the scale of the leftovers' - // scale - let scaled = leftovers - .0 - .normalize_scale(ConvertDetails { - from: leftovers.1, - to: uncredited_settlement_amount.1, - }) - .unwrap(); - *leftovers = ( - uncredited_settlement_amount.0 + scaled, - uncredited_settlement_amount.1, - ); + match leftovers.1.cmp(&uncredited_settlement_amount.1) { + Ordering::Greater => { + // the current leftovers maintain the scale so we just need to + // upscale the provided leftovers to the existing leftovers' scale + let scaled = uncredited_settlement_amount + .0 + .normalize_scale(ConvertDetails { + from: uncredited_settlement_amount.1, + to: leftovers.1, + }) + .unwrap(); + *leftovers = (leftovers.0.clone() + scaled, leftovers.1); + } + Ordering::Equal => { + *leftovers = ( + leftovers.0.clone() + uncredited_settlement_amount.0, + leftovers.1, + ); + } + _ => { + // if the scale of the provided leftovers is bigger than + // existing scale then we update the scale of the leftovers' + // scale + let scaled = leftovers + .0 + .normalize_scale(ConvertDetails { + from: leftovers.1, + to: uncredited_settlement_amount.1, + }) + .unwrap(); + *leftovers = ( + uncredited_settlement_amount.0 + scaled, + uncredited_settlement_amount.1, + ); + } } } else { (*guard).insert(account_id, uncredited_settlement_amount); } - Box::new(ok(())) + Ok(()) } - fn load_uncredited_settlement_amount( + async fn load_uncredited_settlement_amount( &self, account_id: Uuid, local_scale: u8, - ) -> Box + Send> { + ) -> Result { let mut guard = self.uncredited_settlement_amount.write(); if let Some(l) = guard.get_mut(&account_id) { let ret = l.clone(); @@ -242,28 +243,25 @@ impl LeftoversStore for TestStore { scale_with_precision_loss(ret.0, local_scale, ret.1); // save the new leftovers *l = (leftover_precision_loss, std::cmp::max(local_scale, ret.1)); - Box::new(ok(scaled_leftover_amount)) + Ok(scaled_leftover_amount) } else { - Box::new(ok(BigUint::from(0u32))) + Ok(BigUint::from(0u32)) } } - fn get_uncredited_settlement_amount( + async fn get_uncredited_settlement_amount( &self, account_id: Uuid, - ) -> Box + Send> { + ) -> Result<(Self::AssetType, u8), ()> { let leftovers = self.uncredited_settlement_amount.read(); - Box::new(ok(if let Some(a) = leftovers.get(&account_id) { + Ok(if let Some(a) = leftovers.get(&account_id) { a.clone() } else { (BigUint::from(0u32), 1) - })) + }) } - fn clear_uncredited_settlement_amount( - &self, - _account_id: Uuid, - ) -> Box + Send> { + async fn clear_uncredited_settlement_amount(&self, _account_id: Uuid) -> Result<(), ()> { unreachable!() } } @@ -321,29 +319,16 @@ pub fn mock_message(status_code: usize) -> mockito::Mock { .with_body(BODY) } -// Futures helper taken from the store_helpers in interledger-store-redis. -pub fn block_on(f: F) -> Result -where - F: Future + Send + 'static, - F::Item: Send, - F::Error: Send, -{ - // Only run one test at a time - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - runtime.block_on(f) -} - pub fn test_service( ) -> SettlementMessageService + Clone, TestAccount> { SettlementMessageService::new(incoming_service_fn(|_request| { - Box::new(err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&SERVICE_ADDRESS), } - .build())) + .build()) })) } @@ -359,21 +344,21 @@ pub fn test_api( should_fulfill: bool, ) -> warp::filters::BoxedFilter<(impl warp::Reply,)> { let outgoing = outgoing_service_fn(move |_| { - Box::new(if should_fulfill { - ok(FulfillBuilder { + if should_fulfill { + Ok(FulfillBuilder { fulfillment: &[0; 32], data: b"hello!", } .build()) } else { - err(RejectBuilder { + Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other outgoing handler!", data: &[], triggered_by: Some(&SERVICE_ADDRESS), } .build()) - }) + } }); create_settlements_filter(test_store, outgoing) } diff --git a/crates/interledger-settlement/src/core/backends_common/redis/mod.rs b/crates/interledger-settlement/src/core/backends_common/redis/mod.rs index 172afc96e..9df6cac45 100644 --- a/crates/interledger-settlement/src/core/backends_common/redis/mod.rs +++ b/crates/interledger-settlement/src/core/backends_common/redis/mod.rs @@ -4,45 +4,59 @@ use crate::core::{ types::{Convert, ConvertDetails, LeftoversStore}, }; use bytes::Bytes; -use futures::{future::result, Future}; +use futures::TryFutureExt; use http::StatusCode; use num_bigint::BigUint; use redis_crate::{ - self, aio::SharedConnection, cmd, Client, ConnectionInfo, ErrorKind, FromRedisValue, - PipelineCommands, RedisError, RedisWrite, ToRedisArgs, Value, + self, aio::MultiplexedConnection, AsyncCommands, Client, ConnectionInfo, ErrorKind, + FromRedisValue, RedisError, RedisWrite, ToRedisArgs, Value, }; -use std::collections::HashMap as SlowHashMap; +use std::collections::HashMap; use std::str::FromStr; use log::{debug, error, trace}; +use async_trait::async_trait; + #[cfg(test)] mod test_helpers; +/// Domain separator for leftover amounts static UNCREDITED_AMOUNT_KEY: &str = "uncredited_engine_settlement_amount"; + +/// Helper function to get a redis key fn uncredited_amount_key(account_id: &str) -> String { format!("{}:{}", UNCREDITED_AMOUNT_KEY, account_id) } +/// Builder object to create a Redis connection for the engine pub struct EngineRedisStoreBuilder { redis_url: ConnectionInfo, } impl EngineRedisStoreBuilder { + /// Simple constructor pub fn new(redis_url: ConnectionInfo) -> Self { EngineRedisStoreBuilder { redis_url } } - pub fn connect(&self) -> impl Future { - result(Client::open(self.redis_url.clone())) - .map_err(|err| error!("Error creating Redis client: {:?}", err)) - .and_then(|client| { - debug!("Connected to redis: {:?}", client); - client - .get_shared_async_connection() - .map_err(|err| error!("Error connecting to Redis: {:?}", err)) - }) - .and_then(move |connection| Ok(EngineRedisStore { connection })) + /// Connects to the provided redis_url and returns a Redis connection for the Settlement Engine + pub async fn connect(&self) -> Result { + let client = match Client::open(self.redis_url.clone()) { + Ok(c) => c, + Err(err) => { + error!("Error creating Redis client: {:?}", err); + return Err(()); + } + }; + + let connection = client + .get_multiplexed_tokio_connection() + .map_err(|err| error!("Error connecting to Redis: {:?}", err)) + .await?; + debug!("Connected to redis: {:?}", client); + + Ok(EngineRedisStore { connection }) } } @@ -52,56 +66,54 @@ impl EngineRedisStoreBuilder { /// composed in the stores of other Settlement Engines. #[derive(Clone)] pub struct EngineRedisStore { - pub connection: SharedConnection, + pub connection: MultiplexedConnection, } +#[async_trait] impl IdempotentStore for EngineRedisStore { - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let idempotency_key_clone = idempotency_key.clone(); - Box::new( - cmd("HGETALL") - .arg(idempotency_key.clone()) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error loading idempotency key {}: {:?}", - idempotency_key_clone, err - ) - }) - .and_then( - move |(_connection, ret): (_, SlowHashMap)| { - if let (Some(status_code), Some(data), Some(input_hash_slice)) = ( - ret.get("status_code"), - ret.get("data"), - ret.get("input_hash"), - ) { - trace!("Loaded idempotency key {:?} - {:?}", idempotency_key, ret); - let mut input_hash: [u8; 32] = Default::default(); - input_hash.copy_from_slice(input_hash_slice.as_ref()); - Ok(Some(IdempotentData::new( - StatusCode::from_str(status_code).unwrap(), - Bytes::from(data.clone()), - input_hash, - ))) - } else { - Ok(None) - } - }, - ), - ) + let mut connection = self.connection.clone(); + let ret: HashMap = connection + .hgetall(idempotency_key.clone()) + .map_err(move |err| { + error!( + "Error loading idempotency key {}: {:?}", + idempotency_key_clone, err + ) + }) + .await?; + + if let (Some(status_code), Some(data), Some(input_hash_slice)) = ( + ret.get("status_code"), + ret.get("data"), + ret.get("input_hash"), + ) { + trace!("Loaded idempotency key {:?} - {:?}", idempotency_key, ret); + let mut input_hash: [u8; 32] = Default::default(); + input_hash.copy_from_slice(input_hash_slice.as_ref()); + Ok(Some(IdempotentData::new( + StatusCode::from_str(status_code).unwrap(), + Bytes::from(data.clone()), + input_hash, + ))) + } else { + Ok(None) + } } - fn save_idempotent_data( + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut pipe = redis_crate::pipe(); + let mut connection = self.connection.clone(); pipe.atomic() .cmd("HMSET") // cannot use hset_multiple since data and status_code have different types .arg(&idempotency_key) @@ -114,22 +126,20 @@ impl IdempotentStore for EngineRedisStore { .ignore() .expire(&idempotency_key, 86400) .ignore(); - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(|err| error!("Error caching: {:?}", err)) - .and_then(move |(_connection, _): (_, Vec)| { - trace!( - "Cached {:?}: {:?}, {:?}", - idempotency_key, - status_code, - data, - ); - Ok(()) - }), - ) + pipe.query_async(&mut connection) + .map_err(|err| error!("Error caching: {:?}", err)) + .await?; + trace!( + "Cached {:?}: {:?}, {:?}", + idempotency_key, + status_code, + data, + ); + Ok(()) } } +/// Helper datatype for storing and loading quantities of a number with different scales #[derive(Debug, Clone)] struct AmountWithScale { num: BigUint, @@ -149,11 +159,11 @@ impl ToRedisArgs for AmountWithScale { } impl AmountWithScale { + /// Iterates over all values because in this case it's making + /// an lrange call. This returns all the tuple elements in 1 array, and + /// it cannot differentiate between 1 AmountWithScale value or multiple + /// ones. This looks like a limitation of redis.rs fn parse_multi_values(items: &[Value]) -> Option { - // We have to iterate over all values because in this case we're making - // an lrange call. This returns all the tuple elements in 1 array, and - // it cannot differentiate between 1 AmountWithScale value or multiple - // ones. This looks like a limitation of redis.rs let len = items.len(); let mut iter = items.iter(); @@ -216,151 +226,138 @@ impl FromRedisValue for AmountWithScale { } } +#[async_trait] impl LeftoversStore for EngineRedisStore { type AccountId = String; type AssetType = BigUint; - fn get_uncredited_settlement_amount( + async fn get_uncredited_settlement_amount( &self, account_id: Self::AccountId, - ) -> Box + Send> { - Box::new( - cmd("LRANGE") - .arg(uncredited_amount_key(&account_id)) - .arg(0) - .arg(-1) - .query_async(self.connection.clone()) - .map_err(move |err| error!("Error getting uncredited_settlement_amount {:?}", err)) - .and_then(move |(_, amount): (_, AmountWithScale)| Ok((amount.num, amount.scale))), - ) + ) -> Result<(Self::AssetType, u8), ()> { + let mut connection = self.connection.clone(); + let amount: AmountWithScale = connection + .lrange(uncredited_amount_key(&account_id), 0, -1) + .map_err(move |err| error!("Error getting uncredited_settlement_amount {:?}", err)) + .await?; + Ok((amount.num, amount.scale)) } - fn save_uncredited_settlement_amount( + async fn save_uncredited_settlement_amount( &self, account_id: Self::AccountId, uncredited_settlement_amount: (Self::AssetType, u8), - ) -> Box + Send> { + ) -> Result<(), ()> { trace!( "Saving uncredited_settlement_amount {:?} {:?}", account_id, uncredited_settlement_amount ); - Box::new( - // We store these amounts as lists of strings - // because we cannot do BigNumber arithmetic in the store - // When loading the amounts, we convert them to the appropriate data - // type and sum them up. - cmd("RPUSH") - .arg(uncredited_amount_key(&account_id)) - .arg(AmountWithScale { + // We store these amounts as lists of strings + // because we cannot do BigNumber arithmetic in the store + // When loading the amounts, we convert them to the appropriate data + // type and sum them up. + let mut connection = self.connection.clone(); + connection + .rpush( + uncredited_amount_key(&account_id), + AmountWithScale { num: uncredited_settlement_amount.0, scale: uncredited_settlement_amount.1, - }) - .query_async(self.connection.clone()) - .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) - .and_then(move |(_conn, _ret): (_, Value)| Ok(())), - ) + }, + ) + .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) + .await?; + + Ok(()) } - fn load_uncredited_settlement_amount( + async fn load_uncredited_settlement_amount( &self, account_id: Self::AccountId, local_scale: u8, - ) -> Box + Send> { + ) -> Result { let connection = self.connection.clone(); trace!("Loading uncredited_settlement_amount {:?}", account_id); - Box::new( - self.get_uncredited_settlement_amount(account_id.clone()) - .and_then(move |amount| { - // scale the amount from the max scale to the local scale, and then - // save any potential leftovers to the store - let (scaled_amount, precision_loss) = - scale_with_precision_loss(amount.0, local_scale, amount.1); - let mut pipe = redis_crate::pipe(); - pipe.atomic(); - pipe.del(uncredited_amount_key(&account_id)).ignore(); - pipe.rpush( - uncredited_amount_key(&account_id), - AmountWithScale { - num: precision_loss, - scale: std::cmp::max(local_scale, amount.1), - }, - ) - .ignore(); - - pipe.query_async(connection.clone()) - .map_err(move |err| { - error!("Error saving uncredited_settlement_amount: {:?}", err) - }) - .and_then(move |(_conn, _ret): (_, Value)| Ok(scaled_amount)) - }), + let amount = self + .get_uncredited_settlement_amount(account_id.clone()) + .await?; + // scale the amount from the max scale to the local scale, and then + // save any potential leftovers to the store + let (scaled_amount, precision_loss) = + scale_with_precision_loss(amount.0, local_scale, amount.1); + + let mut pipe = redis_crate::pipe(); + pipe.atomic(); + pipe.del(uncredited_amount_key(&account_id)).ignore(); + pipe.rpush( + uncredited_amount_key(&account_id), + AmountWithScale { + num: precision_loss, + scale: std::cmp::max(local_scale, amount.1), + }, ) + .ignore(); + + pipe.query_async(&mut connection.clone()) + .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) + .await?; + + Ok(scaled_amount) } - fn clear_uncredited_settlement_amount( + async fn clear_uncredited_settlement_amount( &self, account_id: Self::AccountId, - ) -> Box + Send> { + ) -> Result<(), ()> { trace!("Clearing uncredited_settlement_amount {:?}", account_id,); - Box::new( - cmd("DEL") - .arg(uncredited_amount_key(&account_id)) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!("Error clearing uncredited_settlement_amount: {:?}", err) - }) - .and_then(move |(_conn, _ret): (_, Value)| Ok(())), - ) + let mut connection = self.connection.clone(); + connection + .del(uncredited_amount_key(&account_id)) + .map_err(move |err| error!("Error clearing uncredited_settlement_amount: {:?}", err)) + .await?; + + Ok(()) } } #[cfg(test)] mod tests { use super::*; - use test_helpers::{block_on, test_store, IDEMPOTENCY_KEY}; + use test_helpers::{test_store, IDEMPOTENCY_KEY}; mod idempotency { use super::*; - #[test] - fn saves_and_loads_idempotency_key_data_properly() { - block_on(test_store().and_then(|(store, context)| { - let input_hash: [u8; 32] = Default::default(); - store - .save_idempotent_data( - IDEMPOTENCY_KEY.clone(), - input_hash, - StatusCode::OK, - Bytes::from("TEST"), - ) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .load_idempotent_data(IDEMPOTENCY_KEY.clone()) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |data1| { - assert_eq!( - data1.unwrap(), - IdempotentData::new( - StatusCode::OK, - Bytes::from("TEST"), - input_hash - ) - ); - let _ = context; - - store - .load_idempotent_data("asdf".to_string()) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |data2| { - assert!(data2.is_none()); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() + #[tokio::test] + async fn saves_and_loads_idempotency_key_data_properly() { + // The context must be loaded into scope + let (store, _context) = test_store().await.unwrap(); + let input_hash: [u8; 32] = Default::default(); + store + .save_idempotent_data( + IDEMPOTENCY_KEY.clone(), + input_hash, + StatusCode::OK, + Bytes::from("TEST"), + ) + .await + .unwrap(); + + let data1 = store + .load_idempotent_data(IDEMPOTENCY_KEY.clone()) + .await + .unwrap(); + assert_eq!( + data1.unwrap(), + IdempotentData::new(StatusCode::OK, Bytes::from("TEST"), input_hash) + ); + + let data2 = store + .load_idempotent_data("asdf".to_string()) + .await + .unwrap(); + assert!(data2.is_none()); } } } diff --git a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/mod.rs b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/mod.rs index df4750134..473ea26d8 100644 --- a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/mod.rs +++ b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/mod.rs @@ -3,4 +3,4 @@ mod redis_helpers; #[cfg(test)] mod store_helpers; #[cfg(test)] -pub use store_helpers::{block_on, test_store, IDEMPOTENCY_KEY}; +pub use store_helpers::{test_store, IDEMPOTENCY_KEY}; diff --git a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/redis_helpers.rs b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/redis_helpers.rs index 44ae632ef..4bdb63bba 100644 --- a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/redis_helpers.rs +++ b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/redis_helpers.rs @@ -1,7 +1,7 @@ // Copied from https://github.com/mitsuhiko/redis-rs/blob/9a1777e8a90c82c315a481cdf66beb7d69e681a2/tests/support/mod.rs #![allow(dead_code)] -use futures::Future; +use futures::TryFutureExt; use redis_crate::{self as redis, RedisError}; use std::{env, fs, path::PathBuf, process, thread::sleep, time::Duration}; @@ -155,19 +155,20 @@ impl TestContext { self.client.get_connection().unwrap() } - pub fn async_connection(&self) -> impl Future { + pub async fn async_connection(&self) -> Result { self.client .get_async_connection() .map_err(|err| panic!(err)) + .await } pub fn stop_server(&mut self) { self.server.stop(); } - pub fn shared_async_connection( + pub async fn shared_async_connection( &self, - ) -> impl Future { - self.client.get_shared_async_connection() + ) -> Result { + self.client.get_multiplexed_tokio_connection().await } } diff --git a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/store_helpers.rs b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/store_helpers.rs index f4305d907..ba3d96ebe 100644 --- a/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/store_helpers.rs +++ b/crates/interledger-settlement/src/core/backends_common/redis/test_helpers/store_helpers.rs @@ -1,31 +1,16 @@ use super::redis_helpers::TestContext; use crate::core::backends_common::redis::{EngineRedisStore, EngineRedisStoreBuilder}; -use env_logger; -use futures::Future; -use tokio::runtime::Runtime; - use lazy_static::lazy_static; lazy_static! { pub static ref IDEMPOTENCY_KEY: String = String::from("abcd"); } -pub fn test_store() -> impl Future { +pub async fn test_store() -> Result<(EngineRedisStore, TestContext), ()> { let context = TestContext::new(); - EngineRedisStoreBuilder::new(context.get_client_connection_info()) + let store = EngineRedisStoreBuilder::new(context.get_client_connection_info()) .connect() - .and_then(|store| Ok((store, context))) -} - -pub fn block_on(f: F) -> Result -where - F: Future + Send + 'static, - F::Item: Send, - F::Error: Send, -{ - // Only run one test at a time - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - runtime.block_on(f) + .await?; + Ok((store, context)) } diff --git a/crates/interledger-settlement/src/core/engines_api.rs b/crates/interledger-settlement/src/core/engines_api.rs index f6e57db73..8d7143ce8 100644 --- a/crates/interledger-settlement/src/core/engines_api.rs +++ b/crates/interledger-settlement/src/core/engines_api.rs @@ -7,22 +7,142 @@ use super::{ idempotency::{make_idempotent_call, IdempotentStore}, types::{Quantity, SettlementEngine}, }; -use bytes::buf::FromBuf; use bytes::Bytes; -use futures::Future; use http::StatusCode; use hyper::Response; use interledger_http::error::default_rejection_handler; - use serde::{Deserialize, Serialize}; - -use warp::{self, reject::Rejection, Filter}; +use warp::Filter; #[derive(Serialize, Deserialize, Debug, Clone, Hash)] pub struct CreateAccount { id: String, } +/// Makes an idempotent call to [`engine.create_account`](../types/trait.SettlementEngine.html#tymethod.create_account) +/// Returns `Status Code 201` +async fn create_engine_account( + idempotency_key: Option, + account_id: CreateAccount, + engine: E, + store: S, +) -> Result +where + E: SettlementEngine + Clone + Send + Sync + 'static, + S: IdempotentStore + Clone + Send + Sync + 'static, +{ + let input_hash = get_hash_of(account_id.id.as_ref()); + let (status_code, message) = make_idempotent_call( + store, + engine.create_account(account_id.id), + input_hash, + idempotency_key, + StatusCode::CREATED, + Bytes::from("CREATED"), + ) + .await?; + + Ok(Response::builder() + .header("Content-Type", "application/json") + .status(status_code) + .body(message) + .unwrap()) +} + +/// Makes an idempotent call to [`engine.delete_account`](../types/trait.SettlementEngine.html#tymethod.delete_account) +/// Returns Status Code `204`. +async fn delete_engine_account( + account_id: String, + idempotency_key: Option, + engine: E, + store: S, +) -> Result +where + E: SettlementEngine + Clone + Send + Sync + 'static, + S: IdempotentStore + Clone + Send + Sync + 'static, +{ + let input_hash = get_hash_of(account_id.as_ref()); + let (status_code, message) = make_idempotent_call( + store, + engine.delete_account(account_id), + input_hash, + idempotency_key, + StatusCode::NO_CONTENT, + Bytes::from("DELETED"), + ) + .await?; + + Ok(Response::builder() + .header("Content-Type", "application/json") + .status(status_code) + .body(message) + .unwrap()) +} + +/// Makes an idempotent call to [`engine.send_money`](../types/trait.SettlementEngine.html#tymethod.send_money) +/// Returns Status Code `201` +async fn engine_send_money( + id: String, + idempotency_key: Option, + quantity: Quantity, + engine: E, + store: S, +) -> Result +where + E: SettlementEngine + Clone + Send + Sync + 'static, + S: IdempotentStore + Clone + Send + Sync + 'static, +{ + let input = format!("{}{:?}", id, quantity); + let input_hash = get_hash_of(input.as_ref()); + let (status_code, message) = make_idempotent_call( + store, + engine.send_money(id, quantity), + input_hash, + idempotency_key, + StatusCode::CREATED, + Bytes::from("EXECUTED"), + ) + .await?; + + Ok(Response::builder() + .header("Content-Type", "application/json") + .status(status_code) + .body(message) + .unwrap()) +} + +/// Makes an idempotent call to [`engine.receive_message`](../types/trait.SettlementEngine.html#tymethod.receive_message) +/// Returns Status Code `201` +async fn engine_receive_message( + id: String, + idempotency_key: Option, + message: Bytes, + engine: E, + store: S, +) -> Result +where + E: SettlementEngine + Clone + Send + Sync + 'static, + S: IdempotentStore + Clone + Send + Sync + 'static, +{ + let input = format!("{}{:?}", id, message); + let input_hash = get_hash_of(input.as_ref()); + let (status_code, message) = make_idempotent_call( + store, + engine.receive_message(id, message.to_vec()), + input_hash, + idempotency_key, + StatusCode::CREATED, + Bytes::from("RECEIVED"), + ) + .await?; + + Ok(Response::builder() + .header("Content-Type", "application/json") + .status(status_code) + .body(message) + .unwrap()) +} + /// Returns a Settlement Engine filter which exposes a Warp-compatible /// idempotent API which forwards calls to the provided settlement engine which /// uses the underlying store for idempotency. @@ -37,161 +157,51 @@ where let with_store = warp::any().map(move || store.clone()).boxed(); let with_engine = warp::any().map(move || engine.clone()).boxed(); let idempotency = warp::header::optional::("idempotency-key"); - let account_id = warp::path("accounts").and(warp::path::param2::()); // account_id + let account_id = warp::path("accounts").and(warp::path::param::()); // account_id // POST /accounts/ (optional idempotency-key header) // Body is a Vec object - let accounts = warp::post2() + let accounts = warp::post() .and(warp::path("accounts")) .and(warp::path::end()) .and(idempotency) .and(warp::body::json()) .and(with_engine.clone()) .and(with_store.clone()) - .and_then( - move |idempotency_key: Option, - account_id: CreateAccount, - engine: E, - store: S| { - let account_id = account_id.id; - let input_hash = get_hash_of(account_id.as_ref()); - - // Wrap do_send_outgoing_message in a closure to be invoked by - // the idempotency wrapper - let create_account_fn = move || engine.create_account(account_id); - make_idempotent_call( - store, - create_account_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - Bytes::from("CREATED"), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .header("Content-Type", "application/json") - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); + .and_then(create_engine_account); // DELETE /accounts/:id (optional idempotency-key header) - let del_account = warp::delete2() + let del_account = warp::delete() .and(account_id) .and(warp::path::end()) .and(idempotency) .and(with_engine.clone()) .and(with_store.clone()) - .and_then( - move |id: String, idempotency_key: Option, engine: E, store: S| { - let input_hash = get_hash_of(id.as_ref()); - - // Wrap do_send_outgoing_message in a closure to be invoked by - // the idempotency wrapper - let delete_account_fn = move || engine.delete_account(id); - make_idempotent_call( - store, - delete_account_fn, - input_hash, - idempotency_key, - StatusCode::NO_CONTENT, - Bytes::from("DELETED"), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .header("Content-Type", "application/json") - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); - - // POST /accounts/:account_id/settlements (optional idempotency-key header) + .and_then(delete_engine_account); + + // POST /accounts/:aVcount_id/settlements (optional idempotency-key header) // Body is a Quantity object let settlement_endpoint = account_id.and(warp::path("settlements")); - let settlements = warp::post2() + let settlements = warp::post() .and(settlement_endpoint) .and(warp::path::end()) .and(idempotency) .and(warp::body::json()) .and(with_engine.clone()) .and(with_store.clone()) - .and_then( - move |id: String, - idempotency_key: Option, - quantity: Quantity, - engine: E, - store: S| { - let input = format!("{}{:?}", id, quantity); - let input_hash = get_hash_of(input.as_ref()); - let send_money_fn = move || engine.send_money(id, quantity); - make_idempotent_call( - store, - send_money_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - Bytes::from("EXECUTED"), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .header("Content-Type", "application/json") - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); + .and_then(engine_send_money); // POST /accounts/:account_id/messages (optional idempotency-key header) // Body is a Vec object let messages_endpoint = account_id.and(warp::path("messages")); - let messages = warp::post2() + let messages = warp::post() .and(messages_endpoint) .and(warp::path::end()) .and(idempotency) - .and(warp::body::concat()) - .and(with_engine.clone()) - .and(with_store.clone()) - .and_then( - move |id: String, - idempotency_key: Option, - body: warp::body::FullBody, - engine: E, - store: S| { - // Gets called by our settlement engine, forwards the request outwards - // until it reaches the peer's settlement engine. - let message = Vec::from_buf(body); - let input = format!("{}{:?}", id, message); - let input_hash = get_hash_of(input.as_ref()); - - // Wrap do_send_outgoing_message in a closure to be invoked by - // the idempotency wrapper - let receive_message_fn = move || engine.receive_message(id, message); - make_idempotent_call( - store, - receive_message_fn, - input_hash, - idempotency_key, - StatusCode::CREATED, - Bytes::from("RECEIVED"), - ) - .map_err::<_, Rejection>(move |err| err.into()) - .and_then(move |(status_code, message)| { - Ok(Response::builder() - .header("Content-Type", "application/json") - .status(status_code) - .body(message) - .unwrap()) - }) - }, - ); + .and(warp::body::bytes()) + .and(with_engine) + .and(with_store) + .and_then(engine_receive_message); accounts .or(del_account) @@ -205,11 +215,10 @@ where mod tests { use super::*; use crate::core::idempotency::IdempotentData; - use crate::core::types::ApiResponse; + use crate::core::types::{ApiResponse, ApiResult}; + use async_trait::async_trait; use bytes::Bytes; - use futures::future::ok; use http::StatusCode; - use interledger_http::error::ApiError; use parking_lot::RwLock; use serde_json::{json, Value}; use std::collections::HashMap; @@ -242,78 +251,66 @@ mod tests { } } + #[async_trait] impl IdempotentStore for TestStore { - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let cache = self.cache.read(); if let Some(data) = cache.get(&idempotency_key) { let mut guard = self.cache_hits.write(); *guard += 1; // used to test how many times this branch gets executed - Box::new(ok(Some(data.clone()))) + Ok(Some(data.clone())) } else { - Box::new(ok(None)) + Ok(None) } } - fn save_idempotent_data( + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut cache = self.cache.write(); cache.insert( idempotency_key, IdempotentData::new(status_code, data, input_hash), ); - Box::new(ok(())) + Ok(()) } } pub static IDEMPOTENCY: &str = "abcd01234"; + #[async_trait] impl SettlementEngine for TestEngine { - fn send_money( - &self, - _account_id: String, - _money: Quantity, - ) -> Box + Send> { - Box::new(ok(ApiResponse::Default)) + async fn send_money(&self, _account_id: String, _money: Quantity) -> ApiResult { + Ok(ApiResponse::Default) } - fn receive_message( - &self, - _account_id: String, - _message: Vec, - ) -> Box + Send> { - Box::new(ok(ApiResponse::Default)) + async fn receive_message(&self, _account_id: String, _message: Vec) -> ApiResult { + Ok(ApiResponse::Default) } - fn create_account( - &self, - _account_id: String, - ) -> Box + Send> { - Box::new(ok(ApiResponse::Default)) + async fn create_account(&self, _account_id: String) -> ApiResult { + Ok(ApiResponse::Default) } - fn delete_account( - &self, - _account_id: String, - ) -> Box + Send> { - Box::new(ok(ApiResponse::Default)) + async fn delete_account(&self, _account_id: String) -> ApiResult { + Ok(ApiResponse::Default) } } - #[test] - fn idempotent_execute_settlement() { + #[tokio::test] + async fn idempotent_execute_settlement() { let store = test_store(); let engine = TestEngine; let api = create_settlement_engine_filter(engine, store.clone()); - let settlement_call = move |id, amount, scale| { + let settlement_call = |id, amount, scale| { warp::test::request() .method("POST") .path(&format!("/accounts/{}/settlements", id)) @@ -322,25 +319,25 @@ mod tests { .reply(&api) }; - let ret = settlement_call("1".to_owned(), 100, 6); + let ret = settlement_call("1".to_owned(), 100, 6).await; assert_eq!(ret.status(), StatusCode::CREATED); assert_eq!(ret.body(), "EXECUTED"); // is idempotent - let ret = settlement_call("1".to_owned(), 100, 6); + let ret = settlement_call("1".to_owned(), 100, 6).await; assert_eq!(ret.status(), StatusCode::CREATED); assert_eq!(ret.body(), "EXECUTED"); - // // fails with different id and same data - let ret = settlement_call("42".to_owned(), 100, 6); + // fails with different id and same data + let ret = settlement_call("42".to_owned(), 100, 6).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); // fails with same id and different data - let ret = settlement_call("1".to_owned(), 42, 6); + let ret = settlement_call("1".to_owned(), 42, 6).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); // fails with different id and different data - let ret = settlement_call("42".to_owned(), 42, 6); + let ret = settlement_call("42".to_owned(), 42, 6).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); let cache = store.cache.read(); @@ -352,13 +349,13 @@ mod tests { assert_eq!(cached_data.body, "EXECUTED".to_string()); } - #[test] - fn idempotent_receive_message() { + #[tokio::test] + async fn idempotent_receive_message() { let store = test_store(); let engine = TestEngine; let api = create_settlement_engine_filter(engine, store.clone()); - let messages_call = move |id, msg| { + let messages_call = |id, msg| { warp::test::request() .method("POST") .path(&format!("/accounts/{}/messages", id)) @@ -367,25 +364,25 @@ mod tests { .reply(&api) }; - let ret = messages_call("1", vec![0]); + let ret = messages_call("1", vec![0]).await; assert_eq!(ret.status().as_u16(), StatusCode::CREATED); assert_eq!(ret.body(), "RECEIVED"); // is idempotent - let ret = messages_call("1", vec![0]); + let ret = messages_call("1", vec![0]).await; assert_eq!(ret.status().as_u16(), StatusCode::CREATED); assert_eq!(ret.body(), "RECEIVED"); - // // fails with different id and same data - let ret = messages_call("42", vec![0]); + // fails with different id and same data + let ret = messages_call("42", vec![0]).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); // fails with same id and different data - let ret = messages_call("1", vec![42]); + let ret = messages_call("1", vec![42]).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); // fails with different id and different data - let ret = messages_call("42", vec![42]); + let ret = messages_call("42", vec![42]).await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); let cache = store.cache.read(); @@ -397,13 +394,13 @@ mod tests { assert_eq!(cached_data.body, "RECEIVED".to_string()); } - #[test] - fn idempotent_create_account() { + #[tokio::test] + async fn idempotent_create_account() { let store = test_store(); let engine = TestEngine; let api = create_settlement_engine_filter(engine, store.clone()); - let create_account_call = move |id: &str| { + let create_account_call = |id: &str| { warp::test::request() .method("POST") .path("/accounts") @@ -412,17 +409,17 @@ mod tests { .reply(&api) }; - let ret = create_account_call("1"); + let ret = create_account_call("1").await; assert_eq!(ret.status().as_u16(), StatusCode::CREATED); assert_eq!(ret.body(), "CREATED"); // is idempotent - let ret = create_account_call("1"); + let ret = create_account_call("1").await; assert_eq!(ret.status().as_u16(), StatusCode::CREATED); assert_eq!(ret.body(), "CREATED"); // fails with different id - let ret = create_account_call("42"); + let ret = create_account_call("42").await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); let cache = store.cache.read(); @@ -434,13 +431,13 @@ mod tests { assert_eq!(cached_data.body, "CREATED".to_string()); } - #[test] - fn idempotent_delete_account() { + #[tokio::test] + async fn idempotent_delete_account() { let store = test_store(); let engine = TestEngine; let api = create_settlement_engine_filter(engine, store.clone()); - let delete_account_call = move |id: &str| { + let delete_account_call = |id: &str| { warp::test::request() .method("DELETE") .path(&format!("/accounts/{}", id)) @@ -448,17 +445,17 @@ mod tests { .reply(&api) }; - let ret = delete_account_call("1"); + let ret = delete_account_call("1").await; assert_eq!(ret.status(), StatusCode::NO_CONTENT); assert_eq!(ret.body(), "DELETED"); // is idempotent - let ret = delete_account_call("1"); + let ret = delete_account_call("1").await; assert_eq!(ret.status(), StatusCode::NO_CONTENT); assert_eq!(ret.body(), "DELETED"); // fails with different id - let ret = delete_account_call("42"); + let ret = delete_account_call("42").await; check_error_status_and_message(ret, 409, "Provided idempotency key is tied to other input"); let cache = store.cache.read(); diff --git a/crates/interledger-settlement/src/core/idempotency.rs b/crates/interledger-settlement/src/core/idempotency.rs index f79972577..7f8ebea4c 100644 --- a/crates/interledger-settlement/src/core/idempotency.rs +++ b/crates/interledger-settlement/src/core/idempotency.rs @@ -1,22 +1,25 @@ -use super::types::ApiResponse; +use super::types::{ApiResponse, ApiResult}; +use async_trait::async_trait; use bytes::Bytes; -use futures::executor::spawn; -use futures::{ - future::{err, ok, Either}, - Future, -}; +use futures::Future; +use futures::TryFutureExt; use http::StatusCode; use interledger_http::error::*; use log::error; +/// Data stored for the idempotency features #[derive(Debug, Clone, PartialEq)] pub struct IdempotentData { + /// The HTTP Status Code of the API's response pub status: StatusCode, + /// The body of the API's response pub body: Bytes, + /// The hash of the serialized input parameters which generated the API's response pub input_hash: [u8; 32], } impl IdempotentData { + /// Simple constructor pub fn new(status: StatusCode, body: Bytes, input_hash: [u8; 32]) -> Self { Self { status, @@ -26,64 +29,67 @@ impl IdempotentData { } } +/// Store trait which should be implemented for idempotency related features +#[async_trait] pub trait IdempotentStore { /// Returns the API response that was saved when the idempotency key was used /// Also returns a hash of the input data which resulted in the response - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send>; + ) -> Result, ()>; /// Saves the data that was passed along with the api request for later - /// The store MUST also save a hash of the input, so that it errors out on requests - fn save_idempotent_data( + /// The store also saves the hash of the input, so that it errors out on requests + /// with conflicting input hashes for the same idempotency key + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send>; + ) -> Result<(), ()>; } -// Helper function that returns any idempotent data that corresponds to a -// provided idempotency key. It fails if the hash of the input that -// generated the idempotent data does not match the hash of the provided input. -fn check_idempotency( +/// Helper function that returns any idempotent data that corresponds to a +/// provided idempotency key. It fails if the hash of the input that +/// generated the idempotent data does not match the hash of the provided input. +async fn check_idempotency( store: S, idempotency_key: String, input_hash: [u8; 32], -) -> impl Future, Error = ApiError> +) -> Result, ApiError> where S: IdempotentStore + Clone + Send + Sync + 'static, { - store + let ret: Option = store .load_idempotent_data(idempotency_key.clone()) .map_err(move |_| IDEMPOTENT_STORE_CALL_ERROR.clone()) - .and_then(move |ret: Option| { - if let Some(ret) = ret { - // Check if the hash (ret.2) of the loaded idempotent data matches the hash - // of the provided input data. If not, we should error out since - // the caller provided an idempotency key that was used for a - // different input. - if ret.input_hash == input_hash { - Ok(Some((ret.status, ret.body))) - } else { - Ok(Some(( - StatusCode::from_u16(409).unwrap(), - Bytes::from(IDEMPOTENCY_CONFLICT_ERR), - ))) - } - } else { - Ok(None) - } - }) + .await?; + + if let Some(ret) = ret { + // Check if the hash (ret.2) of the loaded idempotent data matches the hash + // of the provided input data. If not, we should error out since + // the caller provided an idempotency key that was used for a + // different input. + if ret.input_hash == input_hash { + Ok(Some((ret.status, ret.body))) + } else { + Ok(Some(( + StatusCode::from_u16(409).unwrap(), + Bytes::from(IDEMPOTENCY_CONFLICT_ERR), + ))) + } + } else { + Ok(None) + } } // make_idempotent_call takes a function instead of direct arguments so that we // can reuse it for both the messages and the settlements calls -pub fn make_idempotent_call( +pub async fn make_idempotent_call( store: S, - non_idempotent_function: F, + non_idempotent_function: impl Future, input_hash: [u8; 32], idempotency_key: Option, // As per the spec, the success status code is independent of the @@ -91,88 +97,78 @@ pub fn make_idempotent_call( status_code: StatusCode, // The default value is used when the engine returns a default return type default_return_value: Bytes, -) -> impl Future +) -> Result<(StatusCode, Bytes), ApiError> where - F: FnOnce() -> Box + Send>, S: IdempotentStore + Clone + Send + Sync + 'static, { if let Some(idempotency_key) = idempotency_key { // If there an idempotency key was provided, check idempotency - // and the key was not present or conflicting with an existing - // key, perform the call and save the idempotent return data - Either::A( - check_idempotency(store.clone(), idempotency_key.clone(), input_hash).and_then( - move |ret: Option<(StatusCode, Bytes)>| { - if let Some(ret) = ret { - if ret.0.is_success() { - Either::A(Either::A(ok((ret.0, ret.1)))) - } else { - let err_msg = ApiErrorType { - r#type: &ProblemType::Default, - status: ret.0, - title: "Idempotency Error", - }; - // if check_idempotency returns an error, then it - // has to be an idempotency error - let ret_error = ApiError::from_api_error_type(&err_msg) - .detail(String::from_utf8_lossy(&ret.1).to_string()); - Either::A(Either::B(err(ret_error))) + match check_idempotency(store.clone(), idempotency_key.clone(), input_hash).await? { + Some(ret) => { + if ret.0.is_success() { + // Return an OK response if the idempotent call was successful + Ok((ret.0, ret.1)) + } else { + // Return an HTTP Error otherwise + let err_msg = ApiErrorType { + r#type: &ProblemType::Default, + status: ret.0, + title: "Idempotency Error", + }; + // if check_idempotency returns an error, then it + // has to be an idempotency error + let ret_error = ApiError::from_api_error_type(&err_msg) + .detail(String::from_utf8_lossy(&ret.1).to_string()); + Err(ret_error) + } + } + None => { + // If there was no previous entry, make the idempotent call and save it + // Note: The error is also saved idempotently + let ret = match non_idempotent_function.await { + Ok(r) => r, + Err(ret) => { + let status_code = ret.status; + let data = Bytes::from(ret.detail.clone().unwrap_or_default()); + if store + .save_idempotent_data(idempotency_key, input_hash, status_code, data) + .await + .is_err() + { + // Should we be panicking here instead? + error!("Failed to connect to the store! The request will not be idempotent if retried.") } - } else { - Either::B( - non_idempotent_function().map_err({ - let store = store.clone(); - let idempotency_key = idempotency_key.clone(); - move |ret: ApiError| { - let status_code = ret.status; - let data = Bytes::from(ret.detail.clone().unwrap_or_default()); - spawn(store.save_idempotent_data( - idempotency_key, - input_hash, - status_code, - data, - ).map_err(move |_| error!("Failed to connect to the store! The request will not be idempotent if retried."))); - ret - }}) - .map(move |ret| { - let data = match ret { - ApiResponse::Default => default_return_value, - ApiResponse::Data(d) => d, - }; - (status_code, data) - }).and_then( - move |ret: (StatusCode, Bytes)| { - store - .save_idempotent_data( - idempotency_key, - input_hash, - ret.0, - ret.1.clone(), - ) - .map_err(move |_| { - error!("Failed to connect to the store! The request will not be idempotent if retried."); - IDEMPOTENT_STORE_CALL_ERROR.clone() - }) - .and_then(move |_| Ok((ret.0, ret.1))) - }, - ), - ) + return Err(ret); } - }, - ), - ) + }; + + let data = match ret { + ApiResponse::Default => default_return_value, + ApiResponse::Data(d) => d, + }; + // TODO refactor for readability, can unify the 2 idempotency calls, the error and the data + // are both Bytes + store + .save_idempotent_data( + idempotency_key, + input_hash, + status_code, + data.clone(), + ) + .map_err(move |_| { + error!("Failed to connect to the store! The request will not be idempotent if retried."); + IDEMPOTENT_STORE_CALL_ERROR.clone() + }).await?; + + Ok((status_code, data)) + } + } } else { // otherwise just make the call w/o any idempotency saves - Either::B( - non_idempotent_function() - .map(move |ret| { - let data = match ret { - ApiResponse::Default => default_return_value, - ApiResponse::Data(d) => d, - }; - (status_code, data) - }) - .and_then(move |ret: (StatusCode, Bytes)| Ok((ret.0, ret.1))), - ) + let data = match non_idempotent_function.await? { + ApiResponse::Default => default_return_value, + ApiResponse::Data(d) => d, + }; + Ok((status_code, data)) } } diff --git a/crates/interledger-settlement/src/core/mod.rs b/crates/interledger-settlement/src/core/mod.rs index 29fca3b2b..58679fc53 100644 --- a/crates/interledger-settlement/src/core/mod.rs +++ b/crates/interledger-settlement/src/core/mod.rs @@ -2,10 +2,14 @@ /// Only exported if the `backends_common` feature flag is enabled #[cfg(feature = "backends_common")] pub mod backends_common; -/// The REST API for the settlement engines + +/// Web service which exposes settlement related endpoints as described in the [RFC](https://interledger.org/rfcs/0038-settlement-engines/), +/// All endpoints are idempotent. pub mod engines_api; + /// Expose useful utilities for implementing idempotent functionalities pub mod idempotency; + /// Expose useful traits pub mod types; @@ -14,6 +18,27 @@ use num_traits::Zero; use ring::digest::{digest, SHA256}; use types::{Convert, ConvertDetails}; +/// Converts a number from a precision to another while taking precision loss into account +/// +/// # Examples +/// ```rust +/// # use num_bigint::BigUint; +/// # use interledger_settlement::core::scale_with_precision_loss; +/// assert_eq!( +/// scale_with_precision_loss(BigUint::from(905u32), 9, 11), +/// (BigUint::from(9u32), BigUint::from(5u32)) +/// ); +/// +/// assert_eq!( +/// scale_with_precision_loss(BigUint::from(8053u32), 9, 12), +/// (BigUint::from(8u32), BigUint::from(53u32)) +/// ); +/// +/// assert_eq!( +/// scale_with_precision_loss(BigUint::from(1u32), 9, 6), +/// (BigUint::from(1000u32), BigUint::from(0u32)) +/// ); +/// ``` pub fn scale_with_precision_loss( amount: BigUint, local_scale: u8, @@ -49,9 +74,7 @@ pub fn scale_with_precision_loss( } } -// Helper function that returns any idempotent data that corresponds to a -// provided idempotency key. It fails if the hash of the input that -// generated the idempotent data does not match the hash of the provided input. +/// Returns the 32-bytes SHA256 hash of the provided preimage pub fn get_hash_of(preimage: &[u8]) -> [u8; 32] { let mut hash = [0; 32]; hash.copy_from_slice(digest(&SHA256, preimage).as_ref()); diff --git a/crates/interledger-settlement/src/core/types.rs b/crates/interledger-settlement/src/core/types.rs index 4168a0805..347460574 100644 --- a/crates/interledger-settlement/src/core/types.rs +++ b/crates/interledger-settlement/src/core/types.rs @@ -1,5 +1,5 @@ +use async_trait::async_trait; use bytes::Bytes; -use futures::Future; use http::StatusCode; use interledger_http::error::{ApiError, ApiErrorType, ProblemType}; use interledger_packet::Address; @@ -12,14 +12,14 @@ use std::str::FromStr; use url::Url; use uuid::Uuid; -// Account without an engine error +/// No Engine Configured for Account error type (404 Not Found) pub const NO_ENGINE_CONFIGURED_ERROR_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::Default, title: "No settlement engine configured", status: StatusCode::NOT_FOUND, }; -// Number conversion errors +/// Number Conversion error type (404 Not Found) pub const CONVERSION_ERROR_TYPE: ApiErrorType = ApiErrorType { r#type: &ProblemType::Default, title: "Conversion error", @@ -27,16 +27,25 @@ pub const CONVERSION_ERROR_TYPE: ApiErrorType = ApiErrorType { }; lazy_static! { + /// The Settlement ILP Address as defined in the [RFC](https://interledger.org/rfcs/0038-settlement-engines/) pub static ref SE_ILP_ADDRESS: Address = Address::from_str("peer.settle").unwrap(); } +/// The Quantity object as defined in the [RFC](https://interledger.org/rfcs/0038-settlement-engines/) +/// An amount denominated in some unit of a single, fungible asset. +/// (Since each account is denominated in a single asset, the type of asset is implied.) #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Quantity { + /// Amount of the unit, which is a non-negative integer. + /// This amount is encoded as a string to ensure no precision + /// is lost on platforms that don't natively support arbitrary precision integers. pub amount: String, + /// Asset scale of the unit pub scale: u8, } impl Quantity { + /// Creates a new Quantity object pub fn new(amount: impl ToString, scale: u8) -> Self { Quantity { amount: amount.to_string(), @@ -45,116 +54,146 @@ impl Quantity { } } +/// Helper enum allowing API responses to not specify any data and let the consumer +/// of the call decide what to do with the success value +// TODO: could this maybe be omitted and replaced with Option in Responses? #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] pub enum ApiResponse { + /// The API call succeeded without any returned data. Default, + /// The API call returned some data which should be consumed. Data(Bytes), } +/// Type alias for Result over [`ApiResponse`](./enum.ApiResponse.html), +/// and [`ApiError`](../../../interledger_http/error/struct.ApiError.html) +pub type ApiResult = Result; + /// Trait consumed by the Settlement Engine HTTP API. Every settlement engine /// MUST implement this trait, so that it can be then be exposed over the API. +#[async_trait] pub trait SettlementEngine { - fn create_account( - &self, - account_id: String, - ) -> Box + Send>; - - fn delete_account( - &self, - account_id: String, - ) -> Box + Send>; - - fn send_money( - &self, - account_id: String, - money: Quantity, - ) -> Box + Send>; - - fn receive_message( - &self, - account_id: String, - message: Vec, - ) -> Box + Send>; + /// Informs the settlement engine that a new account was created + /// within the accounting system using the given account identifier. + /// The settlement engine MAY perform tasks as a prerequisite to settle with the account. + /// For example, a settlement engine implementation might send messages to + /// the peer to exchange ledger identifiers or to negotiate settlement-related fees. + async fn create_account(&self, account_id: String) -> ApiResult; + + /// Instructs the settlement engine that an account was deleted. + async fn delete_account(&self, account_id: String) -> ApiResult; + + /// Asynchronously send an outgoing settlement. The accounting system sends this request and accounts for outgoing settlements. + async fn send_money(&self, account_id: String, money: Quantity) -> ApiResult; + + /// Process and respond to an incoming message from the peer's settlement engine. + /// The connector sends this request when it receives an incoming settlement message + /// from the peer, and returns the response message back to the peer. + async fn receive_message(&self, account_id: String, message: Vec) -> ApiResult; } // TODO: Since we still haven't finalized all the settlement details, we might // end up deciding to add some more values, e.g. some settlement engine uid or similar. // All instances of this struct should be replaced with Url instances once/if we // agree that there is no more info required to refer to an engine. +/// The details associated with a settlement engine pub struct SettlementEngineDetails { /// Base URL of the settlement engine pub url: Url, } +/// Extension trait for [Account](../interledger_service/trait.Account.html) with [settlement](https://interledger.org/rfcs/0038-settlement-engines/) related information pub trait SettlementAccount: Account { + /// The [SettlementEngineDetails](./struct.SettlementEngineDetails.html) (if any) associated with that account fn settlement_engine_details(&self) -> Option { None } } +#[async_trait] +/// Trait used by the connector to adjust account balances on settlement events pub trait SettlementStore { type Account: Account; - fn update_balance_for_incoming_settlement( + /// Increases the account's balance/prepaid amount by the provided amount + /// + /// This is optionally idempotent. If the same idempotency_key is provided + /// then no database operation must happen. If there is an idempotency + /// conflict (same idempotency key, different inputs to function) then + /// it should return an error + async fn update_balance_for_incoming_settlement( &self, account_id: Uuid, amount: u64, idempotency_key: Option, - ) -> Box + Send>; + ) -> Result<(), ()>; - fn refund_settlement( - &self, - account_id: Uuid, - settle_amount: u64, - ) -> Box + Send>; + /// Increases the account's balance by the provided amount. + /// Only call this if a settlement request has failed + async fn refund_settlement(&self, account_id: Uuid, settle_amount: u64) -> Result<(), ()>; } +/// Trait used by the connector and engine to track amounts which should have been +/// settled but were not due to precision loss +#[async_trait] pub trait LeftoversStore { type AccountId: ToString; + /// The data type that the store uses for tracking numbers. type AssetType: ToString; /// Saves the leftover data - fn save_uncredited_settlement_amount( + /// + /// @dev: + /// If your store needs to support Big Integers but cannot, consider setting AssetType to String, + /// and then proceed to save a list of uncredited amounts as strings which would get loaded and summed + /// by the load_uncredited_settlement_amount and get_uncredited_settlement_amount + /// functions + async fn save_uncredited_settlement_amount( &self, // The account id that for which there was a precision loss account_id: Self::AccountId, // The amount for which precision loss occurred, along with their scale uncredited_settlement_amount: (Self::AssetType, u8), - ) -> Box + Send>; + ) -> Result<(), ()>; /// Returns the leftover data scaled to `local_scale` from the saved scale. - /// If any precision loss occurs during the scaling, it should be saved as + /// If any precision loss occurs during the scaling, it is be saved as /// the new leftover value. - fn load_uncredited_settlement_amount( + /// + /// @dev: + /// If the store needs to support Big Integers but cannot, consider setting AssetType to String, + /// save a list of uncredited settlement amounts, and load them and sum them in this function + async fn load_uncredited_settlement_amount( &self, account_id: Self::AccountId, local_scale: u8, - ) -> Box + Send>; + ) -> Result; /// Clears any uncredited settlement amount associated with the account - fn clear_uncredited_settlement_amount( + async fn clear_uncredited_settlement_amount( &self, account_id: Self::AccountId, - ) -> Box + Send>; + ) -> Result<(), ()>; - // Gets the current amount of leftovers in the store - fn get_uncredited_settlement_amount( + /// Gets the current amount of leftovers in the store + async fn get_uncredited_settlement_amount( &self, account_id: Self::AccountId, - ) -> Box + Send>; + ) -> Result<(Self::AssetType, u8), ()>; } +/// Helper struct for converting a quantity's amount from one asset scale to another #[derive(Debug)] pub struct ConvertDetails { pub from: u8, pub to: u8, } -/// Traits for u64 and f64 asset code conversions for amounts and rates +/// Helper trait for u64 and f64 asset code conversions for amounts and rates pub trait Convert { type Item: Sized; - // Returns the scaled result, or an error if there was an overflow + /// Returns the scaled result, or an error if there was an overflow fn normalize_scale(&self, details: ConvertDetails) -> Result; } @@ -234,12 +273,9 @@ mod tests { fn u64_test() { // overflows let huge_number = std::u64::MAX / 10; - assert_eq!( - huge_number - .normalize_scale(ConvertDetails { from: 1, to: 18 }) - .unwrap_err(), - (), - ); + assert!(huge_number + .normalize_scale(ConvertDetails { from: 1, to: 18 }) + .is_err(),); // 1 unit with scale 1, is 1 unit with scale 1 assert_eq!( 1u64.normalize_scale(ConvertDetails { from: 1, to: 1 }) @@ -310,15 +346,12 @@ mod tests { #[test] fn f64_test() { // overflow - assert_eq!( - std::f64::MAX - .normalize_scale(ConvertDetails { - from: 1, - to: std::u8::MAX, - }) - .unwrap_err(), - () - ); + assert!(std::f64::MAX + .normalize_scale(ConvertDetails { + from: 1, + to: std::u8::MAX, + }) + .is_err(),); // 1 unit with base 1, is 1 unit with base 1 assert_eq!( diff --git a/crates/interledger-settlement/src/lib.rs b/crates/interledger-settlement/src/lib.rs index b8de39338..dc6a3f1b1 100644 --- a/crates/interledger-settlement/src/lib.rs +++ b/crates/interledger-settlement/src/lib.rs @@ -1,4 +1,6 @@ -// export the API only when explicitly asked #[cfg(feature = "settlement_api")] +/// Settlement API exposed by the Interledger Node +/// This is only available if the `settlement_api` feature is enabled pub mod api; +/// Core module including types, common store implementations for settlement pub mod core; diff --git a/crates/interledger-spsp/Cargo.toml b/crates/interledger-spsp/Cargo.toml index c810b46e3..688bf0c82 100644 --- a/crates/interledger-spsp/Cargo.toml +++ b/crates/interledger-spsp/Cargo.toml @@ -9,14 +9,18 @@ repository = "https://github.com/interledger-rs/interledger-rs" [dependencies] base64 = { version = "0.10.1", default-features = false } -bytes = { version = "0.4.12", default-features = false } +bytes = { version = "0.5", default-features = false } +bytes04 = { package = "bytes", version = "0.4.12", default-features = false } failure = { version = "0.1.5", default-features = false } -futures = { version = "0.1.29", default-features = false } -hyper = { version = "0.12.35", default-features = false } +futures = { version = "0.3.1", default-features = false } +hyper = { version = "0.13.1", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", features = ["serde"], default-features = false } interledger-service = { path = "../interledger-service", version = "^0.4.0", default-features = false } interledger-stream = { path = "../interledger-stream", version = "^0.4.0", default-features = false } log = { version = "0.4.8", default-features = false } -reqwest = { version = "0.9.22", default-features = false, features = ["default-tls"] } +reqwest = { version = "0.10", default-features = false, features = ["default-tls", "json"] } serde = { version = "1.0.101", default-features = false } serde_json = { version = "1.0.41", default-features = false } + +[dev-dependencies] +tokio = { version = "0.2.8", features = ["macros"] } diff --git a/crates/interledger-spsp/src/client.rs b/crates/interledger-spsp/src/client.rs index 0eaf396b3..ddb0e2cfe 100644 --- a/crates/interledger-spsp/src/client.rs +++ b/crates/interledger-spsp/src/client.rs @@ -1,66 +1,66 @@ use super::{Error, SpspResponse}; -use futures::{future::result, Future}; +use futures::TryFutureExt; use interledger_packet::Address; use interledger_service::{Account, IncomingService}; use interledger_stream::{send_money, StreamDelivery}; use log::{debug, error, trace}; -use reqwest::r#async::Client; +use reqwest::Client; use std::convert::TryFrom; -pub fn query(server: &str) -> impl Future { +/// Get an ILP Address and shared secret by the receiver of this payment for this connection +pub async fn query(server: &str) -> Result { let server = payment_pointer_to_url(server); trace!("Querying receiver: {}", server); let client = Client::new(); - client + let res = client .get(&server) .header("Accept", "application/spsp4+json") .send() .map_err(|err| Error::HttpError(format!("Error querying SPSP receiver: {:?}", err))) - .and_then(|res| { - res.error_for_status() - .map_err(|err| Error::HttpError(format!("Error querying SPSP receiver: {:?}", err))) - }) - .and_then(|mut res| { - res.json::() - .map_err(|err| Error::InvalidSpspServerResponseError(format!("{:?}", err))) - }) + .await?; + + let res = res + .error_for_status() + .map_err(|err| Error::HttpError(format!("Error querying SPSP receiver: {:?}", err)))?; + + res.json::() + .map_err(|err| Error::InvalidSpspServerResponseError(format!("{:?}", err))) + .await } /// Query the details of the given Payment Pointer and send a payment using the STREAM protocol. /// /// This returns the amount delivered, as reported by the receiver and in the receiver's asset's units. -pub fn pay( +pub async fn pay( service: S, from_account: A, receiver: &str, source_amount: u64, -) -> impl Future +) -> Result where - S: IncomingService + Clone, - A: Account, + S: IncomingService + Send + Sync + Clone + 'static, + A: Account + Send + Sync + Clone + 'static, { - query(receiver).and_then(move |spsp| { - let shared_secret = spsp.shared_secret; - let dest = spsp.destination_account; - result(Address::try_from(dest).map_err(move |err| { - error!("Error parsing address"); - Error::InvalidSpspServerResponseError(err.to_string()) - })) - .and_then(move |addr| { - debug!("Sending SPSP payment to address: {}", addr); + let spsp = query(receiver).await?; + let shared_secret = spsp.shared_secret; + let dest = spsp.destination_account; + let addr = Address::try_from(dest).map_err(move |err| { + error!("Error parsing address"); + Error::InvalidSpspServerResponseError(err.to_string()) + })?; + debug!("Sending SPSP payment to address: {}", addr); + + let (receipt, _plugin) = + send_money(service, &from_account, addr, &shared_secret, source_amount) + .map_err(move |err| { + error!("Error sending payment: {:?}", err); + Error::SendMoneyError(source_amount) + }) + .await?; - send_money(service, &from_account, addr, &shared_secret, source_amount) - .map(move |(receipt, _plugin)| { - debug!("Sent SPSP payment. StreamDelivery: {:?}", receipt); - receipt - }) - .map_err(move |err| { - error!("Error sending payment: {:?}", err); - Error::SendMoneyError(source_amount) - }) - }) - }) + debug!("Sent SPSP payment. StreamDelivery: {:?}", receipt); + Ok(receipt) } fn payment_pointer_to_url(payment_pointer: &str) -> String { diff --git a/crates/interledger-spsp/src/lib.rs b/crates/interledger-spsp/src/lib.rs index fe2b4ac9a..1a2c0e910 100644 --- a/crates/interledger-spsp/src/lib.rs +++ b/crates/interledger-spsp/src/lib.rs @@ -10,7 +10,9 @@ use interledger_packet::Address; use interledger_stream::Error as StreamError; use serde::{Deserialize, Serialize}; +/// An SPSP client which can query an SPSP Server's payment pointer and initiate a STREAM payment mod client; +/// An SPSP Server implementing an HTTP Service which generates ILP Addresses and Shared Secrets mod server; pub use client::{pay, query}; @@ -33,14 +35,19 @@ pub enum Error { InvalidPaymentPointerError(String), } +/// An SPSP Response returned by the SPSP server #[derive(Debug, Deserialize, Serialize)] pub struct SpspResponse { + /// The generated ILP Address for this SPSP connection destination_account: Address, + /// Base-64 encoded shared secret between SPSP client and server + /// to be consumed for the STREAM connection #[serde(with = "serde_base64")] shared_secret: Vec, } // From https://github.com/serde-rs/json/issues/360#issuecomment-330095360 +#[doc(hidden)] mod serde_base64 { use base64; use serde::{de, Deserialize, Deserializer, Serializer}; diff --git a/crates/interledger-spsp/src/server.rs b/crates/interledger-spsp/src/server.rs index 65541ceae..2d2ce38dc 100644 --- a/crates/interledger-spsp/src/server.rs +++ b/crates/interledger-spsp/src/server.rs @@ -1,12 +1,14 @@ use super::SpspResponse; use bytes::Bytes; -use futures::future::{ok, FutureResult, IntoFuture}; use hyper::{service::Service as HttpService, Body, Error, Request, Response}; use interledger_packet::Address; use interledger_stream::ConnectionGenerator; use log::debug; use std::error::Error as StdError; -use std::{fmt, str}; +use std::{ + fmt, str, + task::{Context, Poll}, +}; /// A Hyper::Service that responds to incoming SPSP Query requests with newly generated /// details for a STREAM connection. @@ -17,14 +19,19 @@ pub struct SpspResponder { } impl SpspResponder { + /// Constructs a new SPSP Responder by receiving an ILP Address and a server **secret** pub fn new(ilp_address: Address, server_secret: Bytes) -> Self { - let connection_generator = ConnectionGenerator::new(server_secret); + let server_secret_compat = bytes04::Bytes::from(server_secret.as_ref()); + let connection_generator = ConnectionGenerator::new(server_secret_compat); SpspResponder { ilp_address, connection_generator, } } + /// Returns an HTTP Response containing the destination account + /// and shared secret for this connection + /// These fields are generated via [Stream's `ConnectionGenerator`](../interledger_stream/struct.ConnectionGenerator.html#method.generate_address_and_secret) pub fn generate_http_response(&self) -> Response { let (destination_account, shared_secret) = self .connection_generator @@ -47,24 +54,17 @@ impl SpspResponder { } } -impl HttpService for SpspResponder { - type ReqBody = Body; - type ResBody = Body; +impl HttpService> for SpspResponder { + type Response = Response; type Error = Error; - type Future = FutureResult, Error>; + type Future = futures::future::Ready>; - fn call(&mut self, _request: Request) -> Self::Future { - ok(self.generate_http_response()) + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Ok(()).into() } -} - -impl IntoFuture for SpspResponder { - type Item = Self; - type Error = Never; - type Future = FutureResult; - fn into_future(self) -> Self::Future { - ok(self) + fn call(&mut self, _request: Request) -> Self::Future { + futures::future::ok(self.generate_http_response()) } } @@ -87,11 +87,10 @@ impl StdError for Never { #[cfg(test)] mod spsp_server_test { use super::*; - use futures::Future; use std::str::FromStr; - #[test] - fn spsp_response_headers() { + #[tokio::test] + async fn spsp_response_headers() { let addr = Address::from_str("example.receiver").unwrap(); let mut responder = SpspResponder::new(addr, Bytes::from(&[0; 32][..])); let response = responder @@ -103,7 +102,7 @@ mod spsp_server_test { .body(Body::empty()) .unwrap(), ) - .wait() + .await .unwrap(); assert_eq!( response.headers().get("Content-Type").unwrap(), diff --git a/crates/interledger-store/Cargo.toml b/crates/interledger-store/Cargo.toml index 083f1d0ff..6d26efbdb 100644 --- a/crates/interledger-store/Cargo.toml +++ b/crates/interledger-store/Cargo.toml @@ -21,8 +21,8 @@ path = "tests/redis/redis_tests.rs" required-features = ["redis"] [dependencies] -bytes = { version = "0.4.12", default-features = false } -futures = { version = "0.1.29", default-features = false } +bytes = { version = "0.5", default-features = false } +futures = { version = "0.3", default-features = false } interledger-api = { path = "../interledger-api", version = "^0.3.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", default-features = false } interledger-btp = { path = "../interledger-btp", version = "^0.4.0", default-features = false } @@ -39,21 +39,20 @@ parking_lot = { version = "0.9.0", default-features = false } ring = { version = "0.16.9", default-features = false } serde = { version = "1.0.101", default-features = false, features = ["derive"] } serde_json = { version = "1.0.41", default-features = false } -tokio-executor = { version = "0.1.8", default-features = false } -tokio-timer = { version = "0.2.11", default-features = false } +tokio = { version = "0.2.6", default-features = false, features = ["macros", "rt-core"] } url = { version = "2.1.0", default-features = false, features = ["serde"] } -http = { version = "0.1.18", default-features = false } -secrecy = { version = "0.5.1", default-features = false, features = ["serde", "bytes"] } -zeroize = { version = "1.0.0", default-features = false, features = ["bytes"] } +http = { version = "0.2", default-features = false } +secrecy = { version = "0.6", features = ["serde", "bytes"] } +zeroize = { version = "1.0.0", default-features = false } num-bigint = { version = "0.2.3", default-features = false, features = ["std"]} uuid = { version = "0.8.1", default-features = false, features = ["serde"] } # redis feature -redis_crate = { package = "redis", version = "0.13.0", default-features = false, features = ["executor"], optional = true } +redis_crate = { package = "redis", version = "0.15.1", default-features = false, features = ["tokio-rt-core"], optional = true } +async-trait = "0.1.22" [dev-dependencies] env_logger = { version = "0.7.0", default-features = false } net2 = { version = "0.2.33", default-features = false } rand = { version = "0.7.2", default-features = false } -tokio = { version = "0.1.22", default-features = false } os_type = { version = "2.2", default-features = false } diff --git a/crates/interledger-store/src/account.rs b/crates/interledger-store/src/account.rs index 6cca6652b..03ca854f7 100644 --- a/crates/interledger-store/src/account.rs +++ b/crates/interledger-store/src/account.rs @@ -11,40 +11,76 @@ use interledger_service_util::{ use interledger_settlement::core::types::{SettlementAccount, SettlementEngineDetails}; use log::error; use ring::aead; -use secrecy::{ExposeSecret, SecretBytes, SecretString}; +use secrecy::{ExposeSecret, SecretBytesMut, SecretString}; use serde::Serializer; use serde::{Deserialize, Serialize}; use std::str::{self, FromStr}; use url::Url; use uuid::Uuid; +/// The account which contains all the data required for a full implementation of Interledger +// TODO: Maybe we should feature gate these fields? e.g. ilp_over_btp variables should only be there +// if btp feature is enabled #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Account { + /// Unique id corresponding to the account's primary key in the database pub(crate) id: Uuid, + /// The account's username pub(crate) username: Username, #[serde(serialize_with = "address_to_string")] + /// The account's Interledger Protocol address pub(crate) ilp_address: Address, // TODO add additional routes + /// The account's currency pub(crate) asset_code: String, + /// The account's asset scale pub(crate) asset_scale: u8, + /// The max amount per packet which can be routed for this account pub(crate) max_packet_amount: u64, + /// The minimum balance this account can have (consider this as a credit/trust limit) pub(crate) min_balance: Option, + /// The account's ILP over HTTP URL (this is where packets are sent over HTTP from your node) pub(crate) ilp_over_http_url: Option, #[serde(serialize_with = "optional_secret_bytes_to_utf8")] - pub(crate) ilp_over_http_incoming_token: Option, + /// The account's API and incoming ILP over HTTP token. + /// This must match the ILP over HTTP outgoing token on the peer's node if receiving + /// packets from that peer + // TODO: The incoming token is used for both ILP over HTTP, and for authorizing actions from the HTTP API. + // Should we add 1 more token, for more granular permissioning? + pub(crate) ilp_over_http_incoming_token: Option, #[serde(serialize_with = "optional_secret_bytes_to_utf8")] - pub(crate) ilp_over_http_outgoing_token: Option, + /// The account's outgoing ILP over HTTP token + /// This must match the ILP over HTTP incoming token on the peer's node if sending + /// packets to that peer + pub(crate) ilp_over_http_outgoing_token: Option, + /// The account's ILP over BTP URL (this is where packets are sent over WebSockets from your node) pub(crate) ilp_over_btp_url: Option, #[serde(serialize_with = "optional_secret_bytes_to_utf8")] - pub(crate) ilp_over_btp_incoming_token: Option, + /// The account's incoming ILP over BTP token. + /// This must match the ILP over BTP outgoing token on the peer's node if exchanging + /// packets with that peer + pub(crate) ilp_over_btp_incoming_token: Option, #[serde(serialize_with = "optional_secret_bytes_to_utf8")] - pub(crate) ilp_over_btp_outgoing_token: Option, + /// The account's outgoing ILP over BTP token. + /// This must match the ILP over BTP incoming token on the peer's node if exchanging + /// packets with that peer + pub(crate) ilp_over_btp_outgoing_token: Option, + /// The threshold after which the balance service will trigger a settlement pub(crate) settle_threshold: Option, + /// The amount which the balance service will attempt to settle down to pub(crate) settle_to: Option, + /// The routing relation of the account pub(crate) routing_relation: RoutingRelation, + /// The round trip time of the account (should be set depending on how + /// well the network connectivity of the account and the node is) pub(crate) round_trip_time: u32, + /// The limit of packets the account can send per minute pub(crate) packets_per_minute_limit: Option, + /// The maximum amount the account can send per minute pub(crate) amount_per_minute_limit: Option, + /// The account's settlement engine URL. If a global engine url is configured + /// for the account's asset code, that will be used instead (even if the account is + /// configured with a specific one) pub(crate) settlement_engine_url: Option, } @@ -56,7 +92,7 @@ where } fn optional_secret_bytes_to_utf8( - _bytes: &Option, + _bytes: &Option, serializer: S, ) -> Result where @@ -66,6 +102,10 @@ where } impl Account { + /// Creates an account from the provided id and details. If there is no ILP Address + /// in the provided details, then the account's ILP Address is generated by appending + /// the `details.username` to the provided `node_ilp_address`. + /// The default RoutingRelation is `NonRoutingAccount` pub fn try_from( id: Uuid, details: AccountDetails, @@ -118,17 +158,17 @@ impl Account { ilp_over_http_url, ilp_over_http_incoming_token: details .ilp_over_http_incoming_token - .map(|token| SecretBytes::new(token.expose_secret().to_string())), + .map(|token| SecretBytesMut::new(token.expose_secret().as_str())), ilp_over_http_outgoing_token: details .ilp_over_http_outgoing_token - .map(|token| SecretBytes::new(token.expose_secret().to_string())), + .map(|token| SecretBytesMut::new(token.expose_secret().as_str())), ilp_over_btp_url, ilp_over_btp_incoming_token: details .ilp_over_btp_incoming_token - .map(|token| SecretBytes::new(token.expose_secret().to_string())), + .map(|token| SecretBytesMut::new(token.expose_secret().as_str())), ilp_over_btp_outgoing_token: details .ilp_over_btp_outgoing_token - .map(|token| SecretBytes::new(token.expose_secret().to_string())), + .map(|token| SecretBytesMut::new(token.expose_secret().as_str())), settle_to: details.settle_to, settle_threshold: details.settle_threshold, routing_relation, @@ -139,30 +179,31 @@ impl Account { }) } + /// Encrypts the account's incoming/outgoing BTP and HTTP keys with the provided encryption key pub fn encrypt_tokens( mut self, encryption_key: &aead::LessSafeKey, ) -> AccountWithEncryptedTokens { if let Some(ref token) = self.ilp_over_btp_outgoing_token { - self.ilp_over_btp_outgoing_token = Some(SecretBytes::from(encrypt_token( + self.ilp_over_btp_outgoing_token = Some(SecretBytesMut::from(encrypt_token( encryption_key, &token.expose_secret(), ))); } if let Some(ref token) = self.ilp_over_http_outgoing_token { - self.ilp_over_http_outgoing_token = Some(SecretBytes::from(encrypt_token( + self.ilp_over_http_outgoing_token = Some(SecretBytesMut::from(encrypt_token( encryption_key, &token.expose_secret(), ))); } if let Some(ref token) = self.ilp_over_btp_incoming_token { - self.ilp_over_btp_incoming_token = Some(SecretBytes::from(encrypt_token( + self.ilp_over_btp_incoming_token = Some(SecretBytesMut::from(encrypt_token( encryption_key, &token.expose_secret(), ))); } if let Some(ref token) = self.ilp_over_http_incoming_token { - self.ilp_over_http_incoming_token = Some(SecretBytes::from(encrypt_token( + self.ilp_over_http_incoming_token = Some(SecretBytesMut::from(encrypt_token( encryption_key, &token.expose_secret(), ))); @@ -171,12 +212,14 @@ impl Account { } } +/// A wrapper over the [`Account`](./struct.Account.html) which contains their encrypt tokens. #[derive(Debug, Clone)] pub struct AccountWithEncryptedTokens { pub(super) account: Account, } impl AccountWithEncryptedTokens { + /// Decrypts the account's incoming/outgoing BTP and HTTP keys with the provided decryption key pub fn decrypt_tokens(mut self, decryption_key: &aead::LessSafeKey) -> Account { if let Some(ref encrypted) = self.account.ilp_over_btp_outgoing_token { self.account.ilp_over_btp_outgoing_token = @@ -227,6 +270,8 @@ impl AccountWithEncryptedTokens { } } +// The following trait implementations are simple accessors to the Account's fields + impl AccountTrait for Account { fn id(&self) -> Uuid { self.id diff --git a/crates/interledger-store/src/crypto.rs b/crates/interledger-store/src/crypto.rs index 00adb05a6..30d70bcc2 100644 --- a/crates/interledger-store/src/crypto.rs +++ b/crates/interledger-store/src/crypto.rs @@ -1,4 +1,4 @@ -use bytes::Bytes; +use bytes::BytesMut; use ring::{ aead, hmac, rand::{SecureRandom, SystemRandom}, @@ -8,7 +8,7 @@ const NONCE_LENGTH: usize = 12; static ENCRYPTION_KEY_GENERATION_STRING: &[u8] = b"ilp_store_redis_encryption_key"; use core::sync::atomic; -use secrecy::{DebugSecret, Secret, SecretBytes}; +use secrecy::{DebugSecret, Secret, SecretBytesMut}; use std::ptr; use zeroize::Zeroize; @@ -117,7 +117,7 @@ pub fn generate_keys(server_secret: &[u8]) -> (Secret, Secret Bytes { +pub fn encrypt_token(encryption_key: &aead::LessSafeKey, token: &[u8]) -> BytesMut { let mut token = token.to_vec(); let mut nonce: [u8; NONCE_LENGTH] = [0; NONCE_LENGTH]; @@ -129,7 +129,7 @@ pub fn encrypt_token(encryption_key: &aead::LessSafeKey, token: &[u8]) -> Bytes match encryption_key.seal_in_place_append_tag(nonce, aead::Aad::from(&[]), &mut token) { Ok(_) => { token.append(&mut nonce_copy.as_ref().to_vec()); - Bytes::from(token) + BytesMut::from(token.as_slice()) } _ => panic!("Unable to encrypt token"), } @@ -138,7 +138,7 @@ pub fn encrypt_token(encryption_key: &aead::LessSafeKey, token: &[u8]) -> Bytes pub fn decrypt_token( decryption_key: &aead::LessSafeKey, encrypted: &[u8], -) -> Result { +) -> Result { if encrypted.len() < aead::MAX_TAG_LEN { return Err(()); } @@ -150,7 +150,7 @@ pub fn decrypt_token( let nonce = aead::Nonce::assume_unique_for_key(nonce); if let Ok(token) = decryption_key.open_in_place(nonce, aead::Aad::empty(), &mut encrypted) { - Ok(SecretBytes::new(token.to_vec())) + Ok(SecretBytesMut::new(&token[..])) } else { Err(()) } diff --git a/crates/interledger-store/src/lib.rs b/crates/interledger-store/src/lib.rs index 3b67b70fc..2f7412d84 100644 --- a/crates/interledger-store/src/lib.rs +++ b/crates/interledger-store/src/lib.rs @@ -2,7 +2,10 @@ //! //! Backend databases for storing account details, balances, the routing table, etc. +/// A module to define the primitive `Account` struct which implements `Account` related traits. pub mod account; +/// Cryptographic utilities for encrypting/decrypting data as well as clearing data from memory pub mod crypto; +/// A redis backend using [redis-rs](https://github.com/mitsuhiko/redis-rs/) #[cfg(feature = "redis")] pub mod redis; diff --git a/crates/interledger-store/src/redis/mod.rs b/crates/interledger-store/src/redis/mod.rs index c8290dc9c..31d373409 100644 --- a/crates/interledger-store/src/redis/mod.rs +++ b/crates/interledger-store/src/redis/mod.rs @@ -19,12 +19,9 @@ use reconnect::RedisReconnect; use super::account::{Account, AccountWithEncryptedTokens}; use super::crypto::{encrypt_token, generate_keys, DecryptionKey, EncryptionKey}; -use bytes::Bytes; -use futures::{ - future::{err, ok, result, Either}, - sync::mpsc::UnboundedSender, - Future, Stream, -}; +use async_trait::async_trait; +use bytes::{Bytes, BytesMut}; +use futures::channel::mpsc::UnboundedSender; use http::StatusCode; use interledger_api::{AccountDetails, AccountSettings, EncryptedAccountSettings, NodeStore}; use interledger_btp::BtpStore; @@ -46,11 +43,12 @@ use lazy_static::lazy_static; use log::{debug, error, trace, warn}; use num_bigint::BigUint; use parking_lot::RwLock; +use redis_crate::AsyncCommands; use redis_crate::{ self, cmd, from_redis_value, Client, ConnectionInfo, ControlFlow, ErrorKind, FromRedisValue, - PipelineCommands, PubSubCommands, RedisError, RedisWrite, Script, ToRedisArgs, Value, + PubSubCommands, RedisError, RedisWrite, Script, ToRedisArgs, Value, }; -use secrecy::{ExposeSecret, Secret, SecretBytes}; +use secrecy::{ExposeSecret, Secret, SecretBytesMut}; use serde::{Deserialize, Serialize}; use serde_json; use std::{ @@ -62,10 +60,8 @@ use std::{ str, str::FromStr, sync::Arc, - time::{Duration, Instant}, + time::Duration, }; -use tokio_executor::spawn; -use tokio_timer::Interval; use url::Url; use uuid::Uuid; use zeroize::Zeroize; @@ -80,14 +76,17 @@ static DEFAULT_ROUTE_KEY: &str = "routes:default"; static STREAM_NOTIFICATIONS_PREFIX: &str = "stream_notifications:"; static SETTLEMENT_ENGINES_KEY: &str = "settlement_engines"; +/// Domain separator for leftover amounts fn uncredited_amount_key(account_id: impl ToString) -> String { format!("uncredited-amount:{}", account_id.to_string()) } +/// Domain separator for idempotency keys fn prefixed_idempotency_key(idempotency_key: String) -> String { format!("idempotency-key:{}", idempotency_key) } +/// Domain separator for accounts fn accounts_key(account_id: Uuid) -> String { format!("accounts:{}", account_id) } @@ -98,37 +97,46 @@ fn accounts_key(account_id: Uuid) -> String { // process is accessing Redis at the same time. // For more information on scripting in Redis, see https://redis.io/commands/eval lazy_static! { + /// The node's default ILP Address static ref DEFAULT_ILP_ADDRESS: Address = Address::from_str("local.host").unwrap(); /// This lua script fetches an account associated with a username. The client /// MUST ensure that the returned account is authenticated. static ref ACCOUNT_FROM_USERNAME: Script = Script::new(include_str!("lua/account_from_username.lua")); - /// Load a list of accounts + /// Lua script which loads a list of accounts /// If an account does not have a settlement_engine_url set /// but there is one configured for that account's currency, /// it will use the globally configured url static ref LOAD_ACCOUNTS: Script = Script::new(include_str!("lua/load_accounts.lua")); + /// Lua script which reduces the provided account's balance before sending a Prepare packet static ref PROCESS_PREPARE: Script = Script::new(include_str!("lua/process_prepare.lua")); + /// Lua script which increases the provided account's balance after receiving a Fulfill packet static ref PROCESS_FULFILL: Script = Script::new(include_str!("lua/process_fulfill.lua")); + /// Lua script which increases the provided account's balance after receiving a Reject packet static ref PROCESS_REJECT: Script = Script::new(include_str!("lua/process_reject.lua")); + /// Lua script which increases the provided account's balance after a settlement attempt failed static ref REFUND_SETTLEMENT: Script = Script::new(include_str!("lua/refund_settlement.lua")); + /// Lua script which increases the provided account's balance after an incoming settlement succeeded static ref PROCESS_INCOMING_SETTLEMENT: Script = Script::new(include_str!("lua/process_incoming_settlement.lua")); } +/// Builder for the Redis Store pub struct RedisStoreBuilder { redis_url: ConnectionInfo, secret: [u8; 32], poll_interval: u64, + /// Connector's ILP Address. Used to insert `Child` accounts as node_ilp_address: Address, } impl RedisStoreBuilder { + /// Simple Constructor pub fn new(redis_url: ConnectionInfo, secret: [u8; 32]) -> Self { RedisStoreBuilder { redis_url, @@ -138,136 +146,142 @@ impl RedisStoreBuilder { } } + /// Sets the ILP Address corresponding to the node pub fn node_ilp_address(&mut self, node_ilp_address: Address) -> &mut Self { self.node_ilp_address = node_ilp_address; self } + /// Sets the poll interval at which the store will update its routes pub fn poll_interval(&mut self, poll_interval: u64) -> &mut Self { self.poll_interval = poll_interval; self } - pub fn connect(&mut self) -> impl Future { + /// Connects to the Redis Store + /// + /// Specifically + /// 1. Generates encryption and decryption keys + /// 1. Connects to the redis store (ensuring that it reconnects in case of drop) + /// 1. Gets the Node address assigned to us by our parent (if it exists) + /// 1. Starts polling for routing table updates + /// 1. Spawns a thread to notify incoming payments over WebSockets + pub async fn connect(&mut self) -> Result { let redis_info = self.redis_url.clone(); let (encryption_key, decryption_key) = generate_keys(&self.secret[..]); self.secret.zeroize(); // clear the secret after it has been used for key generation let poll_interval = self.poll_interval; let ilp_address = self.node_ilp_address.clone(); - RedisReconnect::connect(redis_info.clone()) + let client = Client::open(redis_info.clone()) + .map_err(|err| error!("Error creating subscription Redis client: {:?}", err))?; + debug!("Connected subscription client to redis: {:?}", client); + let mut connection = RedisReconnect::connect(redis_info.clone()) .map_err(|_| ()) - .join( - result(Client::open(redis_info.clone())) - .map_err(|err| error!("Error creating subscription Redis client: {:?}", err)) - .and_then(|client| { - debug!("Connected subscription client to redis: {:?}", client); - client.get_connection().map_err(|err| { - error!("Error connecting subscription client to Redis: {:?}", err) - }) - }), - ) - .and_then(move |(connection, mut sub_connection)| { - // Before initializing the store, check if we have an address - // that was configured due to adding a parent. If no parent was - // found, use the builder's provided address (local.host) or the - // one we decided to override it with - redis_crate::cmd("GET") - .arg(PARENT_ILP_KEY) - .query_async(connection.clone()) - .map_err(|err| { - error!( - "Error checking whether we have a parent configured: {:?}", - err - ) - }) - .and_then(move |(_, address): (RedisReconnect, Option)| { - Ok(if let Some(address) = address { - Address::from_str(&address).unwrap() - } else { - ilp_address - }) - }) - .and_then(move |node_ilp_address| { - let store = RedisStore { - ilp_address: Arc::new(RwLock::new(node_ilp_address)), - connection, - subscriptions: Arc::new(RwLock::new(HashMap::new())), - exchange_rates: Arc::new(RwLock::new(HashMap::new())), - routes: Arc::new(RwLock::new(Arc::new(HashMap::new()))), - encryption_key: Arc::new(encryption_key), - decryption_key: Arc::new(decryption_key), - }; - - // Poll for routing table updates - // Note: if this behavior changes, make sure to update the Drop implementation - let connection_clone = Arc::downgrade(&store.connection.conn); - let redis_info = store.connection.redis_info.clone(); - let routing_table = store.routes.clone(); - let poll_routes = - Interval::new(Instant::now(), Duration::from_millis(poll_interval)) - .map_err(|err| error!("Interval error: {:?}", err)) - .for_each(move |_| { - if let Some(conn) = connection_clone.upgrade() { - Either::A(update_routes( - RedisReconnect { - conn, - redis_info: redis_info.clone(), - }, - routing_table.clone(), - )) - } else { - debug!("Not polling routes anymore because connection was closed"); - // TODO make sure the interval stops - Either::B(err(())) - } - }); - spawn(poll_routes); - - // Here we spawn a worker thread to listen for incoming messages on Redis pub/sub, - // running a callback for each message received. - // This currently must be a thread rather than a task due to the redis-rs driver - // not yet supporting asynchronous subscriptions (see https://github.com/mitsuhiko/redis-rs/issues/183). - let subscriptions_clone = store.subscriptions.clone(); - std::thread::spawn(move || { - let sub_status = - sub_connection.psubscribe::<_, _, Vec>(&["*"], move |msg| { - let channel_name = msg.get_channel_name(); - if channel_name.starts_with(STREAM_NOTIFICATIONS_PREFIX) { - if let Ok(account_id) = Uuid::from_str(&channel_name[STREAM_NOTIFICATIONS_PREFIX.len()..]) { - let message: PaymentNotification = match serde_json::from_slice(msg.get_payload_bytes()) { - Ok(s) => s, - Err(e) => { - error!("Failed to get payload from subscription: {}", e); - return ControlFlow::Continue; - } - }; - trace!("Subscribed message received for account {}: {:?}", account_id, message); - match subscriptions_clone.read().get(&account_id) { - Some(sender) => { - if let Err(err) = sender.unbounded_send(message) { - error!("Failed to send message: {}", err); - } - } - None => trace!("Ignoring message for account {} because there were no open subscriptions", account_id), - } - } else { - error!("Invalid Uuid in channel name: {}", channel_name); - } - } else { - warn!("Ignoring unexpected message from Redis subscription for channel: {}", channel_name); + .await?; + let mut sub_connection = client + .get_connection() + .map_err(|err| error!("Error connecting subscription client to Redis: {:?}", err))?; + // Before initializing the store, check if we have an address + // that was configured due to adding a parent. If no parent was + // found, use the builder's provided address (local.host) or the + // one we decided to override it with + let address: Option = connection + .get(PARENT_ILP_KEY) + .map_err(|err| { + error!( + "Error checking whether we have a parent configured: {:?}", + err + ) + }) + .await?; + let node_ilp_address = if let Some(address) = address { + Address::from_str(&address).unwrap() + } else { + ilp_address + }; + + let store = RedisStore { + ilp_address: Arc::new(RwLock::new(node_ilp_address)), + connection, + subscriptions: Arc::new(RwLock::new(HashMap::new())), + exchange_rates: Arc::new(RwLock::new(HashMap::new())), + routes: Arc::new(RwLock::new(Arc::new(HashMap::new()))), + encryption_key: Arc::new(encryption_key), + decryption_key: Arc::new(decryption_key), + }; + + // Poll for routing table updates + // Note: if this behavior changes, make sure to update the Drop implementation + let connection_clone = Arc::downgrade(&store.connection.conn); + let redis_info = store.connection.redis_info.clone(); + let routing_table = store.routes.clone(); + + let poll_routes = async move { + let mut interval = tokio::time::interval(Duration::from_millis(poll_interval)); + // Irrefutable while pattern, can we do something here? + loop { + interval.tick().await; + if let Some(conn) = connection_clone.upgrade() { + let _ = update_routes( + RedisReconnect { + conn, + redis_info: redis_info.clone(), + }, + routing_table.clone(), + ) + .await; + } else { + debug!("Not polling routes anymore because connection was closed"); + break; + } + } + Ok::<(), ()>(()) + }; + tokio::spawn(poll_routes); + + // Here we spawn a worker thread to listen for incoming messages on Redis pub/sub, + // running a callback for each message received. + // This currently must be a thread rather than a task due to the redis-rs driver + // not yet supporting asynchronous subscriptions (see https://github.com/mitsuhiko/redis-rs/issues/183). + let subscriptions_clone = store.subscriptions.clone(); + std::thread::spawn(move || { + let sub_status = + sub_connection.psubscribe::<_, _, Vec>(&["*"], move |msg| { + let channel_name = msg.get_channel_name(); + if channel_name.starts_with(STREAM_NOTIFICATIONS_PREFIX) { + if let Ok(account_id) = Uuid::from_str(&channel_name[STREAM_NOTIFICATIONS_PREFIX.len()..]) { + let message: PaymentNotification = match serde_json::from_slice(msg.get_payload_bytes()) { + Ok(s) => s, + Err(e) => { + error!("Failed to get payload from subscription: {}", e); + return ControlFlow::Continue; + } + }; + trace!("Subscribed message received for account {}: {:?}", account_id, message); + match subscriptions_clone.read().get(&account_id) { + Some(sender) => { + if let Err(err) = sender.unbounded_send(message) { + error!("Failed to send message: {}", err); } - ControlFlow::Continue - }); - match sub_status { - Err(e) => warn!("Could not issue psubscribe to Redis: {}", e), - Ok(_) => debug!("Successfully subscribed to Redis pubsub"), + } + None => trace!("Ignoring message for account {} because there were no open subscriptions", account_id), } - }); + } else { + error!("Invalid Uuid in channel name: {}", channel_name); + } + } else { + warn!("Ignoring unexpected message from Redis subscription for channel: {}", channel_name); + } + ControlFlow::Continue + }); + match sub_status { + Err(e) => warn!("Could not issue psubscribe to Redis: {}", e), + Ok(_) => debug!("Successfully subscribed to Redis pubsub"), + } + }); - Ok(store) - }) - }) + Ok(store) } } @@ -279,8 +293,11 @@ impl RedisStoreBuilder { /// future versions of it will use PubSub to subscribe to updates. #[derive(Clone)] pub struct RedisStore { + /// The Store's ILP Address ilp_address: Arc>, + /// A connection which reconnects if dropped by accident connection: RedisReconnect, + /// WebSocket sender which publishes incoming payment updates subscriptions: Arc>>>, exchange_rates: Arc>>, /// The store keeps the routing table in memory so that it can be returned @@ -290,29 +307,32 @@ pub struct RedisStore { /// The inner `Arc` is used so that the `routing_table` method can /// return a reference to the routing table without cloning the underlying data. routes: Arc>>>, + /// Encryption Key so that the no cleartext data are stored encryption_key: Arc>, + /// Decryption Key to provide cleartext data to users decryption_key: Arc>, } impl RedisStore { - fn get_all_accounts_ids(&self) -> impl Future, Error = ()> { - let mut pipe = redis_crate::pipe(); - pipe.smembers("accounts"); - pipe.query_async(self.connection.clone()) + /// Gets all the account ids from Redis + async fn get_all_accounts_ids(&self) -> Result, ()> { + let mut connection = self.connection.clone(); + let account_ids: Vec = connection + .smembers("accounts") .map_err(|err| error!("Error getting account IDs: {:?}", err)) - .and_then(|(_conn, account_ids): (_, Vec>)| { - let account_ids: Vec = account_ids[0].iter().map(|rid| rid.0).collect(); - Ok(account_ids) - }) + .await?; + Ok(account_ids.iter().map(|rid| rid.0).collect()) } - fn redis_insert_account( - &self, + /// Inserts the account corresponding to the provided `AccountWithEncryptedtokens` + /// in Redis. Returns the provided account (tokens remain encrypted) + async fn redis_insert_account( + &mut self, encrypted: AccountWithEncryptedTokens, - ) -> Box + Send> { + ) -> Result { let account = encrypted.account.clone(); let ret = encrypted.clone(); - let connection = self.connection.clone(); + let mut connection = self.connection.clone(); let routing_table = self.routes.clone(); // Check that there isn't already an account with values that MUST be unique let mut pipe = redis_crate::pipe(); @@ -322,153 +342,170 @@ impl RedisStore { pipe.exists(PARENT_ILP_KEY); } - Box::new(pipe.query_async(connection.clone()) + let results: Vec = pipe + .query_async(&mut connection.clone()) .map_err(|err| { - error!("Error checking whether account details already exist: {:?}", err) - }) - .and_then( - move |(connection, results): (RedisReconnect, Vec)| { - if results.iter().any(|val| *val) { - warn!("An account already exists with the same {}. Cannot insert account: {:?}", account.id, account); - Err(()) - } else { - Ok((connection, account)) - } + error!( + "Error checking whether account details already exist: {:?}", + err + ) }) - .and_then(move |(connection, account)| { - let mut pipe = redis_crate::pipe(); - pipe.atomic(); + .await?; + if results.iter().any(|val| *val) { + warn!( + "An account already exists with the same {}. Cannot insert account: {:?}", + account.id, account + ); + return Err(()); + } - // Add the account key to the list of accounts - pipe.sadd("accounts", RedisAccountId(account.id)).ignore(); + let mut pipe = redis_crate::pipe(); + pipe.atomic(); - // Save map for Username -> Account ID - pipe.hset("usernames", account.username().as_ref(), RedisAccountId(account.id)).ignore(); + // Add the account key to the list of accounts + pipe.sadd("accounts", RedisAccountId(account.id)).ignore(); - // Set account details - pipe.cmd("HMSET") - .arg(accounts_key(account.id)) - .arg(encrypted).ignore(); + // Save map for Username -> Account ID + pipe.hset( + "usernames", + account.username().as_ref(), + RedisAccountId(account.id), + ) + .ignore(); - // Set balance-related details - pipe.hset_multiple(accounts_key(account.id), &[("balance", 0), ("prepaid_amount", 0)]).ignore(); + // Set account details + pipe.cmd("HMSET") + .arg(accounts_key(account.id)) + .arg(encrypted) + .ignore(); - if account.should_send_routes() { - pipe.sadd("send_routes_to", RedisAccountId(account.id)).ignore(); - } + // Set balance-related details + pipe.hset_multiple( + accounts_key(account.id), + &[("balance", 0), ("prepaid_amount", 0)], + ) + .ignore(); - if account.should_receive_routes() { - pipe.sadd("receive_routes_from", RedisAccountId(account.id)).ignore(); - } + if account.should_send_routes() { + pipe.sadd("send_routes_to", RedisAccountId(account.id)) + .ignore(); + } - if account.ilp_over_btp_url.is_some() { - pipe.sadd("btp_outgoing", RedisAccountId(account.id)).ignore(); - } + if account.should_receive_routes() { + pipe.sadd("receive_routes_from", RedisAccountId(account.id)) + .ignore(); + } - // Add route to routing table - pipe.hset(ROUTES_KEY, account.ilp_address.to_bytes().to_vec(), RedisAccountId(account.id)) - .ignore(); - - // The parent account settings are done via the API. We just - // had to check for the existence of a parent - pipe.query_async(connection) - .map_err(|err| error!("Error inserting account into DB: {:?}", err)) - .and_then(move |(connection, _ret): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - .and_then(move |_| { - debug!("Inserted account {} (ILP address: {})", account.id, account.ilp_address); - Ok(ret) - }) - })) - } - - fn redis_update_account( + if account.ilp_over_btp_url.is_some() { + pipe.sadd("btp_outgoing", RedisAccountId(account.id)) + .ignore(); + } + + // Add route to routing table + pipe.hset( + ROUTES_KEY, + account.ilp_address.to_bytes().to_vec(), + RedisAccountId(account.id), + ) + .ignore(); + + // The parent account settings are done via the API. We just + // had to check for the existence of a parent + pipe.query_async(&mut connection) + .map_err(|err| error!("Error inserting account into DB: {:?}", err)) + .await?; + + update_routes(connection, routing_table).await?; + debug!( + "Inserted account {} (ILP address: {})", + account.id, account.ilp_address + ); + Ok(ret) + } + + /// Overwrites the account corresponding to the provided `AccountWithEncryptedtokens` + /// in Redis. Returns the provided account (tokens remain encrypted) + async fn redis_update_account( &self, encrypted: AccountWithEncryptedTokens, - ) -> Box + Send> { + ) -> Result { let account = encrypted.account.clone(); - let connection = self.connection.clone(); + let mut connection = self.connection.clone(); let routing_table = self.routes.clone(); - Box::new( - // Check to make sure an account with this ID already exists - redis_crate::cmd("EXISTS") - .arg(accounts_key(account.id)) - // TODO this needs to be atomic with the insertions later, - // waiting on #186 - // TODO: Do not allow this update to happen if - // AccountDetails.RoutingRelation == Parent and parent is - // already set - .query_async(connection.clone()) - .map_err(|err| error!("Error checking whether ID exists: {:?}", err)) - .and_then(move |(connection, exists): (RedisReconnect, bool)| { - if !exists { - warn!( - "No account exists with ID {}, cannot update account {:?}", - account.id, account - ); - return Either::A(err(())); - } - let mut pipe = redis_crate::pipe(); - pipe.atomic(); - // Add the account key to the list of accounts - pipe.sadd("accounts", RedisAccountId(account.id)).ignore(); + // Check to make sure an account with this ID already exists + // TODO this needs to be atomic with the insertions later, + // waiting on #186 + // TODO: Do not allow this update to happen if + // AccountDetails.RoutingRelation == Parent and parent is + // already set + let exists: bool = connection + .exists(accounts_key(account.id)) + .map_err(|err| error!("Error checking whether ID exists: {:?}", err)) + .await?; + + if !exists { + warn!( + "No account exists with ID {}, cannot update account {:?}", + account.id, account + ); + return Err(()); + } + let mut pipe = redis_crate::pipe(); + pipe.atomic(); - // Set account details - pipe.cmd("HMSET") - .arg(accounts_key(account.id)) - .arg(encrypted.clone()) - .ignore(); + // Add the account key to the list of accounts + pipe.sadd("accounts", RedisAccountId(account.id)).ignore(); - if account.should_send_routes() { - pipe.sadd("send_routes_to", RedisAccountId(account.id)) - .ignore(); - } + // Set account details + pipe.cmd("HMSET") + .arg(accounts_key(account.id)) + .arg(encrypted.clone()) + .ignore(); - if account.should_receive_routes() { - pipe.sadd("receive_routes_from", RedisAccountId(account.id)) - .ignore(); - } + if account.should_send_routes() { + pipe.sadd("send_routes_to", RedisAccountId(account.id)) + .ignore(); + } - if account.ilp_over_btp_url.is_some() { - pipe.sadd("btp_outgoing", RedisAccountId(account.id)) - .ignore(); - } + if account.should_receive_routes() { + pipe.sadd("receive_routes_from", RedisAccountId(account.id)) + .ignore(); + } - // Add route to routing table - pipe.hset( - ROUTES_KEY, - account.ilp_address.to_bytes().to_vec(), - RedisAccountId(account.id), - ) - .ignore(); - - Either::B( - pipe.query_async(connection) - .map_err(|err| error!("Error inserting account into DB: {:?}", err)) - .and_then(move |(connection, _ret): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - .and_then(move |_| { - debug!( - "Inserted account {} (id: {}, ILP address: {})", - account.username, account.id, account.ilp_address - ); - Ok(encrypted) - }), - ) - }), + if account.ilp_over_btp_url.is_some() { + pipe.sadd("btp_outgoing", RedisAccountId(account.id)) + .ignore(); + } + + // Add route to routing table + pipe.hset( + ROUTES_KEY, + account.ilp_address.to_bytes().to_vec(), + RedisAccountId(account.id), ) + .ignore(); + + pipe.query_async(&mut connection) + .map_err(|err| error!("Error inserting account into DB: {:?}", err)) + .await?; + update_routes(connection, routing_table).await?; + debug!( + "Inserted account {} (id: {}, ILP address: {})", + account.username, account.id, account.ilp_address + ); + Ok(encrypted) } - fn redis_modify_account( + /// Modifies the account corresponding to the provided `id` with the provided `settings` + /// in Redis. Returns the modified account (tokens remain encrypted) + async fn redis_modify_account( &self, id: Uuid, settings: EncryptedAccountSettings, - ) -> Box + Send> { + ) -> Result { let connection = self.connection.clone(); - let self_clone = self.clone(); + let mut self_clone = self.clone(); let mut pipe = redis_crate::pipe(); pipe.atomic(); @@ -521,135 +558,116 @@ impl RedisStore { pipe.hset(accounts_key(id), "settle_to", settle_to); } - Box::new( - pipe.query_async(connection.clone()) - .map_err(|err| error!("Error modifying user account: {:?}", err)) - .and_then(move |(_connection, _ret): (RedisReconnect, Value)| { - // return the updated account - self_clone.redis_get_account(id) - }), - ) + pipe.query_async(&mut connection.clone()) + .map_err(|err| error!("Error modifying user account: {:?}", err)) + .await?; + + // return the updated account + self_clone.redis_get_account(id).await } - fn redis_get_account( - &self, - id: Uuid, - ) -> Box + Send> { - Box::new( - LOAD_ACCOUNTS - .arg(id.to_string()) - .invoke_async(self.connection.clone()) - .map_err(|err| error!("Error loading accounts: {:?}", err)) - .and_then(|(_, mut accounts): (_, Vec)| { - accounts.pop().ok_or(()) - }), - ) + /// Gets the account (tokens remain encrypted) corresponding to the provided `id` from Redis. + async fn redis_get_account(&mut self, id: Uuid) -> Result { + let mut accounts: Vec = LOAD_ACCOUNTS + .arg(id.to_string()) + .invoke_async(&mut self.connection.clone()) + .map_err(|err| error!("Error loading accounts: {:?}", err)) + .await?; + accounts.pop().ok_or(()) } - fn redis_delete_account( - &self, - id: Uuid, - ) -> Box + Send> { - let connection = self.connection.clone(); + /// Deletes the account corresponding to the provided `id` from Redis. + /// Returns the deleted account (tokens remain encrypted) + async fn redis_delete_account(&mut self, id: Uuid) -> Result { + let mut connection = self.connection.clone(); let routing_table = self.routes.clone(); - Box::new(self.redis_get_account(id).and_then(move |encrypted| { - let account = encrypted.account.clone(); - let mut pipe = redis_crate::pipe(); - pipe.atomic(); - - pipe.srem("accounts", RedisAccountId(account.id)).ignore(); + let encrypted = self.redis_get_account(id).await?; + let account = encrypted.account.clone(); + let mut pipe = redis_crate::pipe(); + pipe.atomic(); - pipe.del(accounts_key(account.id)).ignore(); - pipe.hdel("usernames", account.username().as_ref()).ignore(); + pipe.srem("accounts", RedisAccountId(account.id)).ignore(); - if account.should_send_routes() { - pipe.srem("send_routes_to", RedisAccountId(account.id)) - .ignore(); - } + pipe.del(accounts_key(account.id)).ignore(); + pipe.hdel("usernames", account.username().as_ref()).ignore(); - if account.should_receive_routes() { - pipe.srem("receive_routes_from", RedisAccountId(account.id)) - .ignore(); - } + if account.should_send_routes() { + pipe.srem("send_routes_to", RedisAccountId(account.id)) + .ignore(); + } - if account.ilp_over_btp_url.is_some() { - pipe.srem("btp_outgoing", RedisAccountId(account.id)) - .ignore(); - } + if account.should_receive_routes() { + pipe.srem("receive_routes_from", RedisAccountId(account.id)) + .ignore(); + } - pipe.hdel(ROUTES_KEY, account.ilp_address.to_bytes().to_vec()) + if account.ilp_over_btp_url.is_some() { + pipe.srem("btp_outgoing", RedisAccountId(account.id)) .ignore(); + } - pipe.del(uncredited_amount_key(id)); + pipe.hdel(ROUTES_KEY, account.ilp_address.to_bytes().to_vec()) + .ignore(); - pipe.query_async(connection) - .map_err(|err| error!("Error deleting account from DB: {:?}", err)) - .and_then(move |(connection, _ret): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - .and_then(move |_| { - debug!("Deleted account {}", account.id); - Ok(encrypted) - }) - })) + pipe.del(uncredited_amount_key(id)); + + pipe.query_async(&mut connection) + .map_err(|err| error!("Error deleting account from DB: {:?}", err)) + .await?; + + update_routes(connection, routing_table).await?; + debug!("Deleted account {}", account.id); + Ok(encrypted) } } +#[async_trait] impl AccountStore for RedisStore { type Account = Account; // TODO cache results to avoid hitting Redis for each packet - fn get_accounts( - &self, - account_ids: Vec, - ) -> Box, Error = ()> + Send> { + async fn get_accounts(&self, account_ids: Vec) -> Result, ()> { let decryption_key = self.decryption_key.clone(); let num_accounts = account_ids.len(); let mut script = LOAD_ACCOUNTS.prepare_invoke(); for id in account_ids.iter() { script.arg(id.to_string()); } - Box::new( - script - .invoke_async(self.connection.clone()) - .map_err(|err| error!("Error loading accounts: {:?}", err)) - .and_then(move |(_, accounts): (_, Vec)| { - if accounts.len() == num_accounts { - let accounts = accounts - .into_iter() - .map(|account| { - account.decrypt_tokens(&decryption_key.expose_secret().0) - }) - .collect(); - Ok(accounts) - } else { - Err(()) - } - }), - ) + + // Need to clone the connection here to avoid lifetime errors + let connection = self.connection.clone(); + let accounts: Vec = script + .invoke_async(&mut connection.clone()) + .map_err(|err| error!("Error loading accounts: {:?}", err)) + .await?; + + // Decrypt the accounts. TODO: This functionality should be + // decoupled from redis so that it gets reused by the other backends + if accounts.len() == num_accounts { + let accounts = accounts + .into_iter() + .map(|account| account.decrypt_tokens(&decryption_key.expose_secret().0)) + .collect(); + Ok(accounts) + } else { + Err(()) + } } - fn get_account_id_from_username( - &self, - username: &Username, - ) -> Box + Send> { + async fn get_account_id_from_username(&self, username: &Username) -> Result { let username = username.clone(); - Box::new( - cmd("HGET") - .arg("usernames") - .arg(username.as_ref()) - .query_async(self.connection.clone()) - .map_err(move |err| error!("Error getting account id: {:?}", err)) - .and_then( - move |(_connection, id): (_, Option)| match id { - Some(rid) => Ok(rid.0), - None => { - debug!("Username not found: {}", username); - Err(()) - } - }, - ), - ) + let mut connection = self.connection.clone(); + let id: Option = connection + .hget("usernames", username.as_ref()) + .map_err(move |err| error!("Error getting account id: {:?}", err)) + .await?; + match id { + Some(rid) => Ok(rid.0), + None => { + debug!("Username not found: {}", username); + Err(()) + } + } } } @@ -668,147 +686,146 @@ impl StreamNotificationsStore for RedisStore { fn publish_payment_notification(&self, payment: PaymentNotification) { let username = payment.to_username.clone(); let message = serde_json::to_string(&payment).unwrap(); - let connection = self.connection.clone(); - spawn( - self.get_account_id_from_username(&username) - .map_err(move |_| { + let mut connection = self.connection.clone(); + let self_clone = self.clone(); + tokio::spawn(async move { + let account_id = self_clone + .get_account_id_from_username(&username) + .map_err(|_| { error!( "Failed to find account ID corresponding to username: {}", username ) }) - .and_then(move |account_id| { - debug!( - "Publishing payment notification {} for account {}", - message, account_id - ); - redis_crate::cmd("PUBLISH") - .arg(format!("{}{}", STREAM_NOTIFICATIONS_PREFIX, account_id)) - .arg(message) - .query_async(connection) - .map_err(move |err| error!("Error publish message to Redis: {:?}", err)) - .and_then(move |(_, _): (_, i32)| Ok(())) - }), - ); + .await?; + + debug!( + "Publishing payment notification {} for account {}", + message, account_id + ); + // https://github.com/rust-lang/rust/issues/64960#issuecomment-544219926 + let published_args = format!("{}{}", STREAM_NOTIFICATIONS_PREFIX, account_id.clone()); + redis_crate::cmd("PUBLISH") + .arg(published_args) + .arg(message) + .query_async(&mut connection) + .map_err(move |err| error!("Error publish message to Redis: {:?}", err)) + .await?; + + Ok::<(), ()>(()) + }); } } +#[async_trait] impl BalanceStore for RedisStore { /// Returns the balance **from the account holder's perspective**, meaning the sum of /// the Payable Balance and Pending Outgoing minus the Receivable Balance and the Pending Incoming. - fn get_balance(&self, account: Account) -> Box + Send> { - Box::new( - cmd("HMGET") - .arg(accounts_key(account.id)) - .arg(&["balance", "prepaid_amount"]) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error getting balance for account: {} {:?}", - account.id, err - ) - }) - .and_then(|(_connection, values): (_, Vec)| { - let balance = values[0]; - let prepaid_amount = values[1]; - Ok(balance + prepaid_amount) - }), - ) + async fn get_balance(&self, account: Account) -> Result { + let mut connection = self.connection.clone(); + let values: Vec = connection + .hget(accounts_key(account.id), &["balance", "prepaid_amount"]) + .map_err(move |err| { + error!( + "Error getting balance for account: {} {:?}", + account.id, err + ) + }) + .await?; + + let balance = values[0]; + let prepaid_amount = values[1]; + Ok(balance + prepaid_amount) } - fn update_balances_for_prepare( + async fn update_balances_for_prepare( &self, from_account: Account, // TODO: Make this take only the id incoming_amount: u64, - ) -> Box + Send> { - if incoming_amount > 0 { - let from_account_id = from_account.id; - Box::new( - PROCESS_PREPARE - .arg(RedisAccountId(from_account_id)) - .arg(incoming_amount) - .invoke_async(self.connection.clone()) - .map_err(move |err| { - warn!( - "Error handling prepare from account: {}: {:?}", - from_account_id, err - ) - }) - .and_then(move |(_connection, balance): (_, i64)| { - trace!( - "Processed prepare with incoming amount: {}. Account {} has balance (including prepaid amount): {} ", - incoming_amount, from_account_id, balance - ); - Ok(()) - }), - ) - } else { - Box::new(ok(())) + ) -> Result<(), ()> { + // Don't do anything if the amount was 0 + if incoming_amount == 0 { + return Ok(()); } + + let from_account_id = from_account.id; + let balance: i64 = PROCESS_PREPARE + .arg(RedisAccountId(from_account_id)) + .arg(incoming_amount) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + warn!( + "Error handling prepare from account: {}: {:?}", + from_account_id, err + ) + }) + .await?; + + trace!( + "Processed prepare with incoming amount: {}. Account {} has balance (including prepaid amount): {} ", + incoming_amount, from_account_id, balance + ); + Ok(()) } - fn update_balances_for_fulfill( + async fn update_balances_for_fulfill( &self, to_account: Account, // TODO: Make this take only the id outgoing_amount: u64, - ) -> Box + Send> { - if outgoing_amount > 0 { - let to_account_id = to_account.id; - Box::new( - PROCESS_FULFILL - .arg(RedisAccountId(to_account_id)) - .arg(outgoing_amount) - .invoke_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error handling Fulfill received from account: {}: {:?}", - to_account_id, err - ) - }) - .and_then(move |(_connection, (balance, amount_to_settle)): (_, (i64, u64))| { - trace!("Processed fulfill for account {} for outgoing amount {}. Fulfill call result: {} {}", - to_account_id, - outgoing_amount, - balance, - amount_to_settle, - ); - Ok((balance, amount_to_settle)) - }) - ) - } else { - Box::new(ok((0, 0))) + ) -> Result<(i64, u64), ()> { + if outgoing_amount == 0 { + return Ok((0, 0)); } + let to_account_id = to_account.id; + let (balance, amount_to_settle): (i64, u64) = PROCESS_FULFILL + .arg(RedisAccountId(to_account_id)) + .arg(outgoing_amount) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + error!( + "Error handling Fulfill received from account: {}: {:?}", + to_account_id, err + ) + }) + .await?; + + trace!( + "Processed fulfill for account {} for outgoing amount {}. Fulfill call result: {} {}", + to_account_id, + outgoing_amount, + balance, + amount_to_settle, + ); + Ok((balance, amount_to_settle)) } - fn update_balances_for_reject( + async fn update_balances_for_reject( &self, from_account: Account, // TODO: Make this take only the id incoming_amount: u64, - ) -> Box + Send> { - if incoming_amount > 0 { - let from_account_id = from_account.id; - Box::new( - PROCESS_REJECT - .arg(RedisAccountId(from_account_id)) - .arg(incoming_amount) - .invoke_async(self.connection.clone()) - .map_err(move |err| { - warn!( - "Error handling reject for packet from account: {}: {:?}", - from_account_id, err - ) - }) - .and_then(move |(_connection, balance): (_, i64)| { - trace!( - "Processed reject for incoming amount: {}. Account {} has balance (including prepaid amount): {}", - incoming_amount, from_account_id, balance - ); - Ok(()) - }), - ) - } else { - Box::new(ok(())) + ) -> Result<(), ()> { + if incoming_amount == 0 { + return Ok(()); } + + let from_account_id = from_account.id; + let balance: i64 = PROCESS_REJECT + .arg(RedisAccountId(from_account_id)) + .arg(incoming_amount) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + warn!( + "Error handling reject for packet from account: {}: {:?}", + from_account_id, err + ) + }) + .await?; + + trace!( + "Processed reject for incoming amount: {}. Account {} has balance (including prepaid amount): {}", + incoming_amount, from_account_id, balance + ); + Ok(()) } } @@ -816,11 +833,7 @@ impl ExchangeRateStore for RedisStore { fn get_exchange_rates(&self, asset_codes: &[&str]) -> Result, ()> { let rates: Vec = asset_codes .iter() - .filter_map(|code| { - (*self.exchange_rates.read()) - .get(&code.to_string()) - .cloned() - }) + .filter_map(|code| (*self.exchange_rates.read()).get(*code).cloned()) .collect(); if rates.len() == asset_codes.len() { Ok(rates) @@ -840,145 +853,109 @@ impl ExchangeRateStore for RedisStore { } } +#[async_trait] impl BtpStore for RedisStore { type Account = Account; - fn get_account_from_btp_auth( + async fn get_account_from_btp_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { + ) -> Result { // TODO make sure it can't do script injection! // TODO cache the result so we don't hit redis for every packet (is that // necessary if redis is often used as a cache?) let decryption_key = self.decryption_key.clone(); + let mut connection = self.connection.clone(); let token = token.to_owned(); - Box::new( - ACCOUNT_FROM_USERNAME - .arg(username.as_ref()) - .invoke_async(self.connection.clone()) - .map_err(|err| error!("Error getting account from BTP token: {:?}", err)) - .and_then( - move |(_connection, account): (_, Option)| { - if let Some(account) = account { - let account = account.decrypt_tokens(&decryption_key.expose_secret().0); - if let Some(t) = account.ilp_over_btp_incoming_token.clone() { - let t = t.expose_secret().clone(); - if t == Bytes::from(token) { - Ok(account) - } else { - debug!( - "Found account {} but BTP auth token was wrong", - account.username - ); - Err(()) - } - } else { - debug!( - "Account {} does not have an incoming btp token configured", - account.username - ); - Err(()) - } - } else { - warn!("No account found with BTP token"); - Err(()) - } - }, - ), - ) + let username = username.to_owned(); // TODO: Can we avoid taking ownership? + + let account: Option = ACCOUNT_FROM_USERNAME + .arg(username.as_ref()) + .invoke_async(&mut connection) + .map_err(|err| error!("Error getting account from BTP token: {:?}", err)) + .await?; + + if let Some(account) = account { + let account = account.decrypt_tokens(&decryption_key.expose_secret().0); + if let Some(t) = account.ilp_over_btp_incoming_token.clone() { + let t = t.expose_secret().clone(); + if t == Bytes::from(token) { + Ok(account) + } else { + debug!( + "Found account {} but BTP auth token was wrong", + account.username + ); + Err(()) + } + } else { + debug!( + "Account {} does not have an incoming btp token configured", + account.username + ); + Err(()) + } + } else { + warn!("No account found with BTP token"); + Err(()) + } } - fn get_btp_outgoing_accounts( - &self, - ) -> Box, Error = ()> + Send> { - let decryption_key = self.decryption_key.clone(); - Box::new( - cmd("SMEMBERS") - .arg("btp_outgoing") - .query_async(self.connection.clone()) - .map_err(|err| error!("Error getting members of set btp_outgoing: {:?}", err)) - .and_then( - move |(connection, account_ids): (RedisReconnect, Vec)| { - if account_ids.is_empty() { - Either::A(ok(Vec::new())) - } else { - let mut script = LOAD_ACCOUNTS.prepare_invoke(); - for id in account_ids.iter() { - script.arg(id.to_string()); - } - Either::B( - script - .invoke_async(connection.clone()) - .map_err(|err| { - error!( - "Error getting accounts with outgoing BTP details: {:?}", - err - ) - }) - .and_then( - move |(_connection, accounts): ( - RedisReconnect, - Vec, - )| { - let accounts: Vec = accounts - .into_iter() - .map(|account| { - account.decrypt_tokens( - &decryption_key.expose_secret().0, - ) - }) - .collect(); - Ok(accounts) - }, - ), - ) - } - }, - ), - ) + async fn get_btp_outgoing_accounts(&self) -> Result, ()> { + let mut connection = self.connection.clone(); + + let account_ids: Vec = connection + .smembers("btp_outgoing") + .map_err(|err| error!("Error getting members of set btp_outgoing: {:?}", err)) + .await?; + let account_ids: Vec = account_ids.into_iter().map(|id| id.0).collect(); + + if account_ids.is_empty() { + return Ok(Vec::new()); + } + + let accounts = self.get_accounts(account_ids).await?; + Ok(accounts) } } +#[async_trait] impl HttpStore for RedisStore { type Account = Account; /// Checks if the stored token for the provided account id matches the /// provided token, and if so, returns the account associated with that token - fn get_account_from_http_auth( + async fn get_account_from_http_auth( &self, username: &Username, token: &str, - ) -> Box + Send> { + ) -> Result { // TODO make sure it can't do script injection! let decryption_key = self.decryption_key.clone(); let token = token.to_owned(); - Box::new( - ACCOUNT_FROM_USERNAME - .arg(username.as_ref()) - .invoke_async(self.connection.clone()) - .map_err(|err| error!("Error getting account from HTTP auth: {:?}", err)) - .and_then( - move |(_connection, account): (_, Option)| { - if let Some(account) = account { - let account = account.decrypt_tokens(&decryption_key.expose_secret().0); - if let Some(t) = account.ilp_over_http_incoming_token.clone() { - let t = t.expose_secret().clone(); - if t == Bytes::from(token) { - Ok(account) - } else { - Err(()) - } - } else { - Err(()) - } - } else { - warn!("No account found with given HTTP auth"); - Err(()) - } - }, - ), - ) + let account: Option = ACCOUNT_FROM_USERNAME + .arg(username.as_ref()) + .invoke_async(&mut self.connection.clone()) + .map_err(|err| error!("Error getting account from HTTP auth: {:?}", err)) + .await?; + + if let Some(account) = account { + let account = account.decrypt_tokens(&decryption_key.expose_secret().0); + if let Some(t) = account.ilp_over_http_incoming_token.clone() { + let t = t.expose_secret().clone(); + if t == Bytes::from(token) { + Ok(account) + } else { + Err(()) + } + } else { + Err(()) + } + } else { + warn!("No account found with given HTTP auth"); + Err(()) + } } } @@ -988,19 +965,14 @@ impl RouterStore for RedisStore { } } +#[async_trait] impl NodeStore for RedisStore { type Account = Account; - fn insert_account( - &self, - account: AccountDetails, - ) -> Box + Send> { + async fn insert_account(&self, account: AccountDetails) -> Result { let encryption_key = self.encryption_key.clone(); let id = Uuid::new_v4(); - let account = match Account::try_from(id, account, self.get_ilp_address()) { - Ok(account) => account, - Err(_) => return Box::new(err(())), - }; + let account = Account::try_from(id, account, self.get_ilp_address())?; debug!( "Generated account id for {}: {}", account.username.clone(), @@ -1009,32 +981,24 @@ impl NodeStore for RedisStore { let encrypted = account .clone() .encrypt_tokens(&encryption_key.expose_secret().0); - Box::new( - self.redis_insert_account(encrypted) - .and_then(move |_| Ok(account)), - ) + let mut self_clone = self.clone(); + + self_clone.redis_insert_account(encrypted).await?; + Ok(account) } - fn delete_account(&self, id: Uuid) -> Box + Send> { + async fn delete_account(&self, id: Uuid) -> Result { let decryption_key = self.decryption_key.clone(); - Box::new( - self.redis_delete_account(id).and_then(move |account| { - Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) - }), - ) + let mut self_clone = self.clone(); + let account = self_clone.redis_delete_account(id).await?; + Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) } - fn update_account( - &self, - id: Uuid, - account: AccountDetails, - ) -> Box + Send> { + async fn update_account(&self, id: Uuid, account: AccountDetails) -> Result { let encryption_key = self.encryption_key.clone(); let decryption_key = self.decryption_key.clone(); - let account = match Account::try_from(id, account, self.get_ilp_address()) { - Ok(account) => account, - Err(_) => return Box::new(err(())), - }; + let account = Account::try_from(id, account, self.get_ilp_address())?; + debug!( "Generated account id for {}: {}", account.username.clone(), @@ -1043,19 +1007,16 @@ impl NodeStore for RedisStore { let encrypted = account .clone() .encrypt_tokens(&encryption_key.expose_secret().0); - Box::new( - self.redis_update_account(encrypted) - .and_then(move |account| { - Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) - }), - ) + + let account = self.redis_update_account(encrypted).await?; + Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) } - fn modify_account_settings( + async fn modify_account_settings( &self, id: Uuid, settings: AccountSettings, - ) -> Box + Send> { + ) -> Result { let encryption_key = self.encryption_key.clone(); let decryption_key = self.decryption_key.clone(); let settings = EncryptedAccountSettings { @@ -1068,63 +1029,66 @@ impl NodeStore for RedisStore { &encryption_key.expose_secret().0, token.expose_secret().as_bytes(), ) + .freeze() }), ilp_over_http_incoming_token: settings.ilp_over_http_incoming_token.map(|token| { encrypt_token( &encryption_key.expose_secret().0, token.expose_secret().as_bytes(), ) + .freeze() }), ilp_over_btp_outgoing_token: settings.ilp_over_btp_outgoing_token.map(|token| { encrypt_token( &encryption_key.expose_secret().0, token.expose_secret().as_bytes(), ) + .freeze() }), ilp_over_http_outgoing_token: settings.ilp_over_http_outgoing_token.map(|token| { encrypt_token( &encryption_key.expose_secret().0, token.expose_secret().as_bytes(), ) + .freeze() }), }; - Box::new( - self.redis_modify_account(id, settings) - .and_then(move |account| { - Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) - }), - ) + let account = self.redis_modify_account(id, settings).await?; + Ok(account.decrypt_tokens(&decryption_key.expose_secret().0)) } // TODO limit the number of results and page through them - fn get_all_accounts(&self) -> Box, Error = ()> + Send> { + async fn get_all_accounts(&self) -> Result, ()> { let decryption_key = self.decryption_key.clone(); - let mut pipe = redis_crate::pipe(); - let connection = self.connection.clone(); - pipe.smembers("accounts"); - Box::new(self.get_all_accounts_ids().and_then(move |account_ids| { - let mut script = LOAD_ACCOUNTS.prepare_invoke(); - for id in account_ids.iter() { - script.arg(id.to_string()); - } - script - .invoke_async(connection.clone()) - .map_err(|err| error!("Error getting account ids: {:?}", err)) - .and_then(move |(_, accounts): (_, Vec)| { - let accounts: Vec = accounts - .into_iter() - .map(|account| account.decrypt_tokens(&decryption_key.expose_secret().0)) - .collect(); - Ok(accounts) - }) - })) + let mut connection = self.connection.clone(); + + let account_ids = self.get_all_accounts_ids().await?; + + let mut script = LOAD_ACCOUNTS.prepare_invoke(); + for id in account_ids.iter() { + script.arg(id.to_string()); + } + + let accounts: Vec = script + .invoke_async(&mut connection) + .map_err(|err| error!("Error getting account ids: {:?}", err)) + .await?; + + // TODO this should be refactored so that it gets reused in multiple backends + let accounts: Vec = accounts + .into_iter() + .map(|account| account.decrypt_tokens(&decryption_key.expose_secret().0)) + .collect(); + + Ok(accounts) } - fn set_static_routes(&self, routes: R) -> Box + Send> + async fn set_static_routes(&self, routes: R) -> Result<(), ()> where - R: IntoIterator, + R: IntoIterator + Send + 'async_trait, { + let mut connection = self.connection.clone(); let routes: Vec<(String, RedisAccountId)> = routes .into_iter() .map(|(s, id)| (s, RedisAccountId(id))) @@ -1137,246 +1101,228 @@ impl NodeStore for RedisStore { } let routing_table = self.routes.clone(); - Box::new(pipe.query_async(self.connection.clone()) - .map_err(|err| error!("Error checking if accounts exist while setting static routes: {:?}", err)) - .and_then(|(connection, accounts_exist): (RedisReconnect, Vec)| { - if accounts_exist.iter().all(|a| *a) { - Ok(connection) - } else { - error!("Error setting static routes because not all of the given accounts exist"); - Err(()) - } + + let accounts_exist: Vec = pipe + .query_async(&mut connection) + .map_err(|err| { + error!( + "Error checking if accounts exist while setting static routes: {:?}", + err + ) }) - .and_then(move |connection| { + .await?; + + if !accounts_exist.iter().all(|a| *a) { + error!("Error setting static routes because not all of the given accounts exist"); + return Err(()); + } + let mut pipe = redis_crate::pipe(); pipe.atomic() .del(STATIC_ROUTES_KEY) .ignore() .hset_multiple(STATIC_ROUTES_KEY, &routes) .ignore(); - pipe.query_async(connection) - .map_err(|err| error!("Error setting static routes: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - })) + + pipe.query_async(&mut connection) + .map_err(|err| error!("Error setting static routes: {:?}", err)) + .await?; + + update_routes(connection, routing_table).await?; + Ok(()) } - fn set_static_route( - &self, - prefix: String, - account_id: Uuid, - ) -> Box + Send> { + async fn set_static_route(&self, prefix: String, account_id: Uuid) -> Result<(), ()> { let routing_table = self.routes.clone(); let prefix_clone = prefix.clone(); - Box::new( - cmd("EXISTS") - .arg(accounts_key(account_id)) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error checking if account exists before setting static route: {:?}", err)) - .and_then(move |(connection, exists): (RedisReconnect, bool)| { - if exists { - Ok(connection) - } else { - error!("Cannot set static route for prefix: {} because account {} does not exist", prefix_clone, account_id); - Err(()) - } - }) - .and_then(move |connection| { - cmd("HSET") - .arg(STATIC_ROUTES_KEY) - .arg(prefix) - .arg(RedisAccountId(account_id)) - .query_async(connection) - .map_err(|err| error!("Error setting static route: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) + let mut connection = self.connection.clone(); + + let exists: bool = connection + .exists(accounts_key(account_id)) + .map_err(|err| { + error!( + "Error checking if account exists before setting static route: {:?}", + err + ) }) - ) + .await?; + if !exists { + error!( + "Cannot set static route for prefix: {} because account {} does not exist", + prefix_clone, account_id + ); + return Err(()); + } + + connection + .hset(STATIC_ROUTES_KEY, prefix, RedisAccountId(account_id)) + .map_err(|err| error!("Error setting static route: {:?}", err)) + .await?; + + update_routes(connection, routing_table).await?; + + Ok(()) } - fn set_default_route(&self, account_id: Uuid) -> Box + Send> { + async fn set_default_route(&self, account_id: Uuid) -> Result<(), ()> { let routing_table = self.routes.clone(); // TODO replace this with a lua script to do both calls at once - Box::new( - cmd("EXISTS") - .arg(accounts_key(account_id)) - .query_async(self.connection.clone()) - .map_err(|err| { - error!( - "Error checking if account exists before setting default route: {:?}", - err - ) - }) - .and_then(move |(connection, exists): (RedisReconnect, bool)| { - if exists { - Ok(connection) - } else { - error!( - "Cannot set default route because account {} does not exist", - account_id - ); - Err(()) - } - }) - .and_then(move |connection| { - cmd("SET") - .arg(DEFAULT_ROUTE_KEY) - .arg(RedisAccountId(account_id)) - .query_async(connection) - .map_err(|err| error!("Error setting default route: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - debug!("Set default route to account id: {}", account_id); - update_routes(connection, routing_table) - }) - }), - ) + let mut connection = self.connection.clone(); + let exists: bool = connection + .exists(accounts_key(account_id)) + .map_err(|err| { + error!( + "Error checking if account exists before setting default route: {:?}", + err + ) + }) + .await?; + if !exists { + error!( + "Cannot set default route because account {} does not exist", + account_id + ); + return Err(()); + } + + connection + .set(DEFAULT_ROUTE_KEY, RedisAccountId(account_id)) + .map_err(|err| error!("Error setting default route: {:?}", err)) + .await?; + debug!("Set default route to account id: {}", account_id); + update_routes(connection, routing_table).await?; + Ok(()) } - fn set_settlement_engines( + async fn set_settlement_engines( &self, - asset_to_url_map: impl IntoIterator, - ) -> Box + Send> { + asset_to_url_map: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()> { + let mut connection = self.connection.clone(); let asset_to_url_map: Vec<(String, String)> = asset_to_url_map .into_iter() .map(|(asset_code, url)| (asset_code, url.to_string())) .collect(); debug!("Setting settlement engines to {:?}", asset_to_url_map); - Box::new( - cmd("HMSET") - .arg(SETTLEMENT_ENGINES_KEY) - .arg(asset_to_url_map) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error setting settlement engines: {:?}", err)) - .and_then(|(_, _): (RedisReconnect, Value)| Ok(())), - ) + connection + .hset_multiple(SETTLEMENT_ENGINES_KEY, &asset_to_url_map) + .map_err(|err| error!("Error setting settlement engines: {:?}", err)) + .await?; + Ok(()) } - fn get_asset_settlement_engine( - &self, - asset_code: &str, - ) -> Box, Error = ()> + Send> { - Box::new( - cmd("HGET") - .arg(SETTLEMENT_ENGINES_KEY) - .arg(asset_code) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error getting settlement engine: {:?}", err)) - .map(|(_, url): (_, Option)| { - if let Some(url) = url { - Url::parse(url.as_str()) - .map_err(|err| { - error!( - "Settlement engine URL loaded from Redis was not a valid URL: {:?}", - err - ) - }) - .ok() - } else { - None - } - }), - ) + async fn get_asset_settlement_engine(&self, asset_code: &str) -> Result, ()> { + let mut connection = self.connection.clone(); + let asset_code = asset_code.to_owned(); + + let url: Option = connection + .hget(SETTLEMENT_ENGINES_KEY, asset_code) + .map_err(|err| error!("Error getting settlement engine: {:?}", err)) + .await?; + if let Some(url) = url { + match Url::parse(url.as_str()) { + Ok(url) => Ok(Some(url)), + Err(err) => { + error!( + "Settlement engine URL loaded from Redis was not a valid URL: {:?}", + err + ); + Err(()) + } + } + } else { + Ok(None) + } } } +#[async_trait] impl AddressStore for RedisStore { // Updates the ILP address of the store & iterates over all children and // updates their ILP Address to match the new address. - fn set_ilp_address( - &self, - ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, ilp_address: Address) -> Result<(), ()> { debug!("Setting ILP address to: {}", ilp_address); let routing_table = self.routes.clone(); - let connection = self.connection.clone(); + let mut connection = self.connection.clone(); let ilp_address_clone = ilp_address.clone(); // Set the ILP address we have in memory (*self.ilp_address.write()) = ilp_address.clone(); // Save it to Redis - Box::new( - cmd("SET") - .arg(PARENT_ILP_KEY) - .arg(ilp_address.as_bytes()) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error setting ILP address {:?}", err)) - .and_then(move |(_, _): (RedisReconnect, Value)| Ok(())) - .join(self.get_all_accounts().and_then(move |accounts| { - // TODO: This can be an expensive operation if this function - // gets called often. This currently only gets called when - // inserting a new parent account in the API. It'd be nice - // if we could generate a child's ILP address on the fly, - // instead of having to store the username appended to the - // node's ilp address. Currently this is not possible, as - // account.ilp_address() cannot access any state that exists - // on the store. - let mut pipe = redis_crate::pipe(); - for account in accounts { - // Update the address and routes of all children and non-routing accounts. - if account.routing_relation() != RoutingRelation::Parent - && account.routing_relation() != RoutingRelation::Peer - { - // remove the old route - pipe.hdel(ROUTES_KEY, &account.ilp_address as &str).ignore(); - - // if the username of the account ends with the - // node's address, we're already configured so no - // need to append anything. - let ilp_address_clone2 = ilp_address_clone.clone(); - // Note: We are assuming that if the node's address - // ends with the account's username, then this - // account represents the node's non routing - // account. Is this a reasonable assumption to make? - let new_ilp_address = - if ilp_address_clone2.segments().rev().next().unwrap() - == account.username().to_string() - { - ilp_address_clone2 - } else { - ilp_address_clone - .with_suffix(account.username().as_bytes()) - .unwrap() - }; - pipe.hset( - accounts_key(account.id()), - "ilp_address", - new_ilp_address.as_bytes(), - ) - .ignore(); - - pipe.hset( - ROUTES_KEY, - new_ilp_address.as_bytes(), - RedisAccountId(account.id()), - ) - .ignore(); - } - } - pipe.query_async(connection.clone()) - .map_err(|err| error!("Error updating children: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - update_routes(connection, routing_table) - }) - })) - .and_then(move |_| Ok(())), - ) + connection + .set(PARENT_ILP_KEY, ilp_address.as_bytes()) + .map_err(|err| error!("Error setting ILP address {:?}", err)) + .await?; + + let accounts = self.get_all_accounts().await?; + // TODO: This can be an expensive operation if this function + // gets called often. This currently only gets called when + // inserting a new parent account in the API. It'd be nice + // if we could generate a child's ILP address on the fly, + // instead of having to store the username appended to the + // node's ilp address. Currently this is not possible, as + // account.ilp_address() cannot access any state that exists + // on the store. + let mut pipe = redis_crate::pipe(); + for account in accounts { + // Update the address and routes of all children and non-routing accounts. + if account.routing_relation() != RoutingRelation::Parent + && account.routing_relation() != RoutingRelation::Peer + { + // remove the old route + pipe.hdel(ROUTES_KEY, &account.ilp_address as &str).ignore(); + + // if the username of the account ends with the + // node's address, we're already configured so no + // need to append anything. + let ilp_address_clone2 = ilp_address_clone.clone(); + // Note: We are assuming that if the node's address + // ends with the account's username, then this + // account represents the node's non routing + // account. Is this a reasonable assumption to make? + let new_ilp_address = if ilp_address_clone2.segments().rev().next().unwrap() + == account.username().to_string() + { + ilp_address_clone2 + } else { + ilp_address_clone + .with_suffix(account.username().as_bytes()) + .unwrap() + }; + pipe.hset( + accounts_key(account.id()), + "ilp_address", + new_ilp_address.as_bytes(), + ) + .ignore(); + + pipe.hset( + ROUTES_KEY, + new_ilp_address.as_bytes(), + RedisAccountId(account.id()), + ) + .ignore(); + } + } + + pipe.query_async(&mut connection.clone()) + .map_err(|err| error!("Error updating children: {:?}", err)) + .await?; + update_routes(connection, routing_table).await?; + Ok(()) } - fn clear_ilp_address(&self) -> Box + Send> { - let self_clone = self.clone(); - Box::new( - cmd("DEL") - .arg(PARENT_ILP_KEY) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error removing parent address: {:?}", err)) - .and_then(move |(_, _): (RedisReconnect, Value)| { - *(self_clone.ilp_address.write()) = DEFAULT_ILP_ADDRESS.clone(); - Ok(()) - }), - ) + async fn clear_ilp_address(&self) -> Result<(), ()> { + let mut connection = self.connection.clone(); + connection + .del(PARENT_ILP_KEY) + .map_err(|err| error!("Error removing parent address: {:?}", err)) + .await?; + + // overwrite the ilp address with the default value + *(self.ilp_address.write()) = DEFAULT_ILP_ADDRESS.clone(); + Ok(()) } fn get_ilp_address(&self) -> Address { @@ -1387,163 +1333,102 @@ impl AddressStore for RedisStore { type RoutingTable = HashMap; +#[async_trait] impl RouteManagerStore for RedisStore { type Account = Account; - fn get_accounts_to_send_routes_to( + async fn get_accounts_to_send_routes_to( &self, ignore_accounts: Vec, - ) -> Box, Error = ()> + Send> { - let decryption_key = self.decryption_key.clone(); - Box::new( - cmd("SMEMBERS") - .arg("send_routes_to") - .query_async(self.connection.clone()) - .map_err(|err| error!("Error getting members of set send_routes_to: {:?}", err)) - .and_then( - move |(connection, account_ids): (RedisReconnect, Vec)| { - if account_ids.is_empty() { - Either::A(ok(Vec::new())) - } else { - let mut script = LOAD_ACCOUNTS.prepare_invoke(); - for id in account_ids.iter() { - if !ignore_accounts.contains(&id.0) { - script.arg(id.to_string()); - } - } - Either::B( - script - .invoke_async(connection.clone()) - .map_err(|err| { - error!( - "Error getting accounts to send routes to: {:?}", - err - ) - }) - .and_then( - move |(_connection, accounts): ( - RedisReconnect, - Vec, - )| { - let accounts: Vec = accounts - .into_iter() - .map(|account| { - account.decrypt_tokens( - &decryption_key.expose_secret().0, - ) - }) - .collect(); - Ok(accounts) - }, - ), - ) - } - }, - ), - ) + ) -> Result, ()> { + let mut connection = self.connection.clone(); + + let account_ids: Vec = connection + .smembers("send_routes_to") + .map_err(|err| error!("Error getting members of set send_routes_to: {:?}", err)) + .await?; + let account_ids: Vec = account_ids + .into_iter() + .map(|id| id.0) + .filter(|id| !ignore_accounts.contains(&id)) + .collect(); + if account_ids.is_empty() { + return Ok(Vec::new()); + } + + let accounts = self.get_accounts(account_ids).await?; + Ok(accounts) } - fn get_accounts_to_receive_routes_from( - &self, - ) -> Box, Error = ()> + Send> { - let decryption_key = self.decryption_key.clone(); - Box::new( - cmd("SMEMBERS") - .arg("receive_routes_from") - .query_async(self.connection.clone()) - .map_err(|err| { - error!( - "Error getting members of set receive_routes_from: {:?}", - err - ) - }) - .and_then( - |(connection, account_ids): (RedisReconnect, Vec)| { - if account_ids.is_empty() { - Either::A(ok(Vec::new())) - } else { - let mut script = LOAD_ACCOUNTS.prepare_invoke(); - for id in account_ids.iter() { - script.arg(id.to_string()); - } - Either::B( - script - .invoke_async(connection.clone()) - .map_err(|err| { - error!( - "Error getting accounts to receive routes from: {:?}", - err - ) - }) - .and_then( - move |(_connection, accounts): ( - RedisReconnect, - Vec, - )| { - let accounts: Vec = accounts - .into_iter() - .map(|account| { - account.decrypt_tokens( - &decryption_key.expose_secret().0, - ) - }) - .collect(); - Ok(accounts) - }, - ), - ) - } - }, - ), - ) + async fn get_accounts_to_receive_routes_from(&self) -> Result, ()> { + let mut connection = self.connection.clone(); + let account_ids: Vec = connection + .smembers("receive_routes_from") + .map_err(|err| { + error!( + "Error getting members of set receive_routes_from: {:?}", + err + ) + }) + .await?; + let account_ids: Vec = account_ids.into_iter().map(|id| id.0).collect(); + + if account_ids.is_empty() { + return Ok(Vec::new()); + } + + let accounts = self.get_accounts(account_ids).await?; + Ok(accounts) } - fn get_local_and_configured_routes( + async fn get_local_and_configured_routes( &self, - ) -> Box, RoutingTable), Error = ()> + Send> - { - let get_static_routes = cmd("HGETALL") - .arg(STATIC_ROUTES_KEY) - .query_async(self.connection.clone()) + ) -> Result<(RoutingTable, RoutingTable), ()> { + let mut connection = self.connection.clone(); + let static_routes: Vec<(String, RedisAccountId)> = connection + .hgetall(STATIC_ROUTES_KEY) .map_err(|err| error!("Error getting static routes: {:?}", err)) - .and_then( - |(_, static_routes): (RedisReconnect, Vec<(String, RedisAccountId)>)| { - Ok(static_routes) - }, - ); - Box::new(self.get_all_accounts().join(get_static_routes).and_then( - |(accounts, static_routes)| { - let local_table = HashMap::from_iter( - accounts - .iter() - .map(|account| (account.ilp_address.to_string(), account.clone())), - ); + .await?; - let account_map: HashMap = HashMap::from_iter(accounts.iter().map(|account| (account.id, account))); - let configured_table: HashMap = HashMap::from_iter(static_routes.into_iter() - .filter_map(|(prefix, account_id)| { - if let Some(account) = account_map.get(&account_id.0) { - Some((prefix, (*account).clone())) - } else { - warn!("No account for ID: {}, ignoring configured route for prefix: {}", account_id, prefix); - None - } - })); + let accounts = self.get_all_accounts().await?; - Ok((local_table, configured_table)) - }, - )) + let local_table = HashMap::from_iter( + accounts + .iter() + .map(|account| (account.ilp_address.to_string(), account.clone())), + ); + + let account_map: HashMap = + HashMap::from_iter(accounts.iter().map(|account| (account.id, account))); + let configured_table: HashMap = HashMap::from_iter( + static_routes + .into_iter() + .filter_map(|(prefix, account_id)| { + if let Some(account) = account_map.get(&account_id.0) { + Some((prefix, (*account).clone())) + } else { + warn!( + "No account for ID: {}, ignoring configured route for prefix: {}", + account_id, prefix + ); + None + } + }), + ); + + Ok((local_table, configured_table)) } - fn set_routes( + async fn set_routes( &mut self, - routes: impl IntoIterator, - ) -> Box + Send> { + routes: impl IntoIterator + Send + 'async_trait, + ) -> Result<(), ()> { let routes: Vec<(String, RedisAccountId)> = routes .into_iter() .map(|(prefix, account)| (prefix, RedisAccountId(account.id))) .collect(); let num_routes = routes.len(); + let mut connection = self.connection.clone(); // Save routes to Redis let routing_tale = self.routes.clone(); @@ -1553,28 +1438,28 @@ impl RouteManagerStore for RedisStore { .ignore() .hset_multiple(ROUTES_KEY, &routes) .ignore(); - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(|err| error!("Error setting routes: {:?}", err)) - .and_then(move |(connection, _): (RedisReconnect, Value)| { - trace!("Saved {} routes to Redis", num_routes); - update_routes(connection, routing_tale) - }), - ) + + pipe.query_async(&mut connection) + .map_err(|err| error!("Error setting routes: {:?}", err)) + .await?; + trace!("Saved {} routes to Redis", num_routes); + + update_routes(connection, routing_tale).await } } +#[async_trait] impl RateLimitStore for RedisStore { type Account = Account; /// Apply rate limits for number of packets per minute and amount of money per minute /// /// This uses https://github.com/brandur/redis-cell so the redis-cell module MUST be loaded into redis before this is run - fn apply_rate_limits( + async fn apply_rate_limits( &self, account: Account, prepare_amount: u64, - ) -> Box + Send> { + ) -> Result<(), RateLimitError> { if account.amount_per_minute_limit.is_some() || account.packets_per_minute_limit.is_some() { let mut pipe = redis_crate::pipe(); let packet_limit = account.packets_per_minute_limit.is_some(); @@ -1582,8 +1467,9 @@ impl RateLimitStore for RedisStore { if let Some(limit) = account.packets_per_minute_limit { let limit = limit - 1; + let packets_limit = format!("limit:packets:{}", account.id); pipe.cmd("CL.THROTTLE") - .arg(format!("limit:packets:{}", account.id)) + .arg(packets_limit) .arg(limit) .arg(limit) .arg(60) @@ -1592,113 +1478,115 @@ impl RateLimitStore for RedisStore { if let Some(limit) = account.amount_per_minute_limit { let limit = limit - 1; + let throughput_limit = format!("limit:throughput:{}", account.id); pipe.cmd("CL.THROTTLE") - .arg(format!("limit:throughput:{}", account.id)) + .arg(throughput_limit) // TODO allow separate configuration for burst limit .arg(limit) .arg(limit) .arg(60) .arg(prepare_amount); } - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(|err| { - error!("Error applying rate limits: {:?}", err); - RateLimitError::StoreError - }) - .and_then(move |(_, results): (_, Vec>)| { - if packet_limit && amount_limit { - if results[0][0] == 1 { - Err(RateLimitError::PacketLimitExceeded) - } else if results[1][0] == 1 { - Err(RateLimitError::ThroughputLimitExceeded) - } else { - Ok(()) - } - } else if packet_limit && results[0][0] == 1 { - Err(RateLimitError::PacketLimitExceeded) - } else if amount_limit && results[0][0] == 1 { - Err(RateLimitError::ThroughputLimitExceeded) - } else { - Ok(()) - } - }), - ) + + let mut connection = self.connection.clone(); + let results: Vec> = pipe + .query_async(&mut connection) + .map_err(|err| { + error!("Error applying rate limits: {:?}", err); + RateLimitError::StoreError + }) + .await?; + + if packet_limit && amount_limit { + if results[0][0] == 1 { + Err(RateLimitError::PacketLimitExceeded) + } else if results[1][0] == 1 { + Err(RateLimitError::ThroughputLimitExceeded) + } else { + Ok(()) + } + } else if packet_limit && results[0][0] == 1 { + Err(RateLimitError::PacketLimitExceeded) + } else if amount_limit && results[0][0] == 1 { + Err(RateLimitError::ThroughputLimitExceeded) + } else { + Ok(()) + } } else { - Box::new(ok(())) + Ok(()) } } - fn refund_throughput_limit( + async fn refund_throughput_limit( &self, account: Account, prepare_amount: u64, - ) -> Box + Send> { + ) -> Result<(), ()> { if let Some(limit) = account.amount_per_minute_limit { + let mut connection = self.connection.clone(); let limit = limit - 1; - Box::new( - cmd("CL.THROTTLE") - .arg(format!("limit:throughput:{}", account.id)) - .arg(limit) - .arg(limit) - .arg(60) - // TODO make sure this doesn't overflow - .arg(0i64 - (prepare_amount as i64)) - .query_async(self.connection.clone()) - .map_err(|err| error!("Error refunding throughput limit: {:?}", err)) - .and_then(|(_, _): (_, Value)| Ok(())), - ) - } else { - Box::new(ok(())) + let throughput_limit = format!("limit:throughput:{}", account.id); + cmd("CL.THROTTLE") + .arg(throughput_limit) + .arg(limit) + .arg(limit) + .arg(60) + // TODO make sure this doesn't overflow + .arg(0i64 - (prepare_amount as i64)) + .query_async(&mut connection) + .map_err(|err| error!("Error refunding throughput limit: {:?}", err)) + .await?; } + + Ok(()) } } +#[async_trait] impl IdempotentStore for RedisStore { - fn load_idempotent_data( + async fn load_idempotent_data( &self, idempotency_key: String, - ) -> Box, Error = ()> + Send> { + ) -> Result, ()> { let idempotency_key_clone = idempotency_key.clone(); - Box::new( - cmd("HGETALL") - .arg(prefixed_idempotency_key(idempotency_key.clone())) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error loading idempotency key {}: {:?}", - idempotency_key_clone, err - ) - }) - .and_then(move |(_connection, ret): (_, HashMap)| { - if let (Some(status_code), Some(data), Some(input_hash_slice)) = ( - ret.get("status_code"), - ret.get("data"), - ret.get("input_hash"), - ) { - trace!("Loaded idempotency key {:?} - {:?}", idempotency_key, ret); - let mut input_hash: [u8; 32] = Default::default(); - input_hash.copy_from_slice(input_hash_slice.as_ref()); - Ok(Some(IdempotentData::new( - StatusCode::from_str(status_code).unwrap(), - Bytes::from(data.clone()), - input_hash, - ))) - } else { - Ok(None) - } - }), - ) + let mut connection = self.connection.clone(); + let ret: HashMap = connection + .hgetall(prefixed_idempotency_key(idempotency_key.clone())) + .map_err(move |err| { + error!( + "Error loading idempotency key {}: {:?}", + idempotency_key_clone, err + ) + }) + .await?; + + if let (Some(status_code), Some(data), Some(input_hash_slice)) = ( + ret.get("status_code"), + ret.get("data"), + ret.get("input_hash"), + ) { + trace!("Loaded idempotency key {:?} - {:?}", idempotency_key, ret); + let mut input_hash: [u8; 32] = Default::default(); + input_hash.copy_from_slice(input_hash_slice.as_ref()); + Ok(Some(IdempotentData::new( + StatusCode::from_str(status_code).unwrap(), + Bytes::from(data.clone()), + input_hash, + ))) + } else { + Ok(None) + } } - fn save_idempotent_data( + async fn save_idempotent_data( &self, idempotency_key: String, input_hash: [u8; 32], status_code: StatusCode, data: Bytes, - ) -> Box + Send> { + ) -> Result<(), ()> { let mut pipe = redis_crate::pipe(); + let mut connection = self.connection.clone(); pipe.atomic() .cmd("HMSET") // cannot use hset_multiple since data and status_code have different types .arg(&prefixed_idempotency_key(idempotency_key.clone())) @@ -1711,79 +1599,83 @@ impl IdempotentStore for RedisStore { .ignore() .expire(&prefixed_idempotency_key(idempotency_key.clone()), 86400) .ignore(); - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(|err| error!("Error caching: {:?}", err)) - .and_then(move |(_connection, _): (_, Vec)| { - trace!( - "Cached {:?}: {:?}, {:?}", - idempotency_key, - status_code, - data, - ); - Ok(()) - }), - ) + pipe.query_async(&mut connection) + .map_err(|err| error!("Error caching: {:?}", err)) + .await?; + + trace!( + "Cached {:?}: {:?}, {:?}", + idempotency_key, + status_code, + data, + ); + Ok(()) } } +#[async_trait] impl SettlementStore for RedisStore { type Account = Account; - fn update_balance_for_incoming_settlement( + async fn update_balance_for_incoming_settlement( &self, account_id: Uuid, amount: u64, idempotency_key: Option, - ) -> Box + Send> { + ) -> Result<(), ()> { let idempotency_key = idempotency_key.unwrap(); - Box::new( - PROCESS_INCOMING_SETTLEMENT + let balance: i64 = PROCESS_INCOMING_SETTLEMENT .arg(RedisAccountId(account_id)) .arg(amount) .arg(idempotency_key) - .invoke_async(self.connection.clone()) - .map_err(move |err| error!("Error processing incoming settlement from account: {} for amount: {}: {:?}", account_id, amount, err)) - .and_then(move |(_connection, balance): (_, i64)| { - trace!("Processed incoming settlement from account: {} for amount: {}. Balance is now: {}", account_id, amount, balance); - Ok(()) - })) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + error!( + "Error processing incoming settlement from account: {} for amount: {}: {:?}", + account_id, amount, err + ) + }) + .await?; + trace!( + "Processed incoming settlement from account: {} for amount: {}. Balance is now: {}", + account_id, + amount, + balance + ); + Ok(()) } - fn refund_settlement( - &self, - account_id: Uuid, - settle_amount: u64, - ) -> Box + Send> { + async fn refund_settlement(&self, account_id: Uuid, settle_amount: u64) -> Result<(), ()> { trace!( "Refunding settlement for account: {} of amount: {}", account_id, settle_amount ); - Box::new( - REFUND_SETTLEMENT - .arg(RedisAccountId(account_id)) - .arg(settle_amount) - .invoke_async(self.connection.clone()) - .map_err(move |err| { - error!( - "Error refunding settlement for account: {} of amount: {}: {:?}", - account_id, settle_amount, err - ) - }) - .and_then(move |(_connection, balance): (_, i64)| { - trace!( - "Refunded settlement for account: {} of amount: {}. Balance is now: {}", - account_id, - settle_amount, - balance - ); - Ok(()) - }), - ) + let balance: i64 = REFUND_SETTLEMENT + .arg(RedisAccountId(account_id)) + .arg(settle_amount) + .invoke_async(&mut self.connection.clone()) + .map_err(move |err| { + error!( + "Error refunding settlement for account: {} of amount: {}: {:?}", + account_id, settle_amount, err + ) + }) + .await?; + + trace!( + "Refunded settlement for account: {} of amount: {}. Balance is now: {}", + account_id, + settle_amount, + balance + ); + Ok(()) } } +// TODO: AmountWithScale is re-implemented on Interledger-Settlement. It'd be nice +// if we could deduplicate this by extracting it to a separate crate which would make +// logical sense #[derive(Debug, Clone)] struct AmountWithScale { num: BigUint, @@ -1870,155 +1762,144 @@ impl FromRedisValue for AmountWithScale { } } +#[async_trait] impl LeftoversStore for RedisStore { type AccountId = Uuid; type AssetType = BigUint; - fn get_uncredited_settlement_amount( + async fn get_uncredited_settlement_amount( &self, account_id: Uuid, - ) -> Box + Send> { + ) -> Result<(Self::AssetType, u8), ()> { let mut pipe = redis_crate::pipe(); pipe.atomic(); // get the amounts and instantly delete them pipe.lrange(uncredited_amount_key(account_id.to_string()), 0, -1); pipe.del(uncredited_amount_key(account_id.to_string())) .ignore(); - Box::new( - pipe.query_async(self.connection.clone()) - .map_err(move |err| error!("Error getting uncredited_settlement_amount {:?}", err)) - .and_then(move |(_, amounts): (_, Vec)| { - // this call will only return 1 element - let amount = amounts[0].clone(); - Ok((amount.num, amount.scale)) - }), - ) + + let amounts: Vec = pipe + .query_async(&mut self.connection.clone()) + .map_err(move |err| error!("Error getting uncredited_settlement_amount {:?}", err)) + .await?; + + // this call will only return 1 element + let amount = amounts[0].clone(); + Ok((amount.num, amount.scale)) } - fn save_uncredited_settlement_amount( + async fn save_uncredited_settlement_amount( &self, account_id: Uuid, uncredited_settlement_amount: (Self::AssetType, u8), - ) -> Box + Send> { + ) -> Result<(), ()> { trace!( "Saving uncredited_settlement_amount {:?} {:?}", account_id, uncredited_settlement_amount ); - Box::new( - // We store these amounts as lists of strings - // because we cannot do BigNumber arithmetic in the store - // When loading the amounts, we convert them to the appropriate data - // type and sum them up. - cmd("RPUSH") - .arg(uncredited_amount_key(account_id)) - .arg(AmountWithScale { + // We store these amounts as lists of strings + // because we cannot do BigNumber arithmetic in the store + // When loading the amounts, we convert them to the appropriate data + // type and sum them up. + let mut connection = self.connection.clone(); + connection + .rpush( + uncredited_amount_key(account_id), + AmountWithScale { num: uncredited_settlement_amount.0, scale: uncredited_settlement_amount.1, - }) - .query_async(self.connection.clone()) - .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) - .and_then(move |(_conn, _ret): (_, Value)| Ok(())), - ) + }, + ) + .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) + .await?; + + Ok(()) } - fn load_uncredited_settlement_amount( + async fn load_uncredited_settlement_amount( &self, account_id: Uuid, local_scale: u8, - ) -> Box + Send> { - let connection = self.connection.clone(); + ) -> Result { + let mut connection = self.connection.clone(); trace!("Loading uncredited_settlement_amount {:?}", account_id); - Box::new( - self.get_uncredited_settlement_amount(account_id) - .and_then(move |amount| { - // scale the amount from the max scale to the local scale, and then - // save any potential leftovers to the store - let (scaled_amount, precision_loss) = - scale_with_precision_loss(amount.0, local_scale, amount.1); - if precision_loss > BigUint::from(0u32) { - Either::A( - cmd("RPUSH") - .arg(uncredited_amount_key(account_id)) - .arg(AmountWithScale { - num: precision_loss, - scale: std::cmp::max(local_scale, amount.1), - }) - .query_async(connection.clone()) - .map_err(move |err| { - error!("Error saving uncredited_settlement_amount: {:?}", err) - }) - .and_then(move |(_conn, _ret): (_, Value)| Ok(scaled_amount)), - ) - } else { - Either::B(ok(scaled_amount)) - } - }), - ) + let amount = self.get_uncredited_settlement_amount(account_id).await?; + // scale the amount from the max scale to the local scale, and then + // save any potential leftovers to the store + let (scaled_amount, precision_loss) = + scale_with_precision_loss(amount.0, local_scale, amount.1); + + if precision_loss > BigUint::from(0u32) { + connection + .rpush( + uncredited_amount_key(account_id), + AmountWithScale { + num: precision_loss, + scale: std::cmp::max(local_scale, amount.1), + }, + ) + .map_err(move |err| error!("Error saving uncredited_settlement_amount: {:?}", err)) + .await?; + } + + Ok(scaled_amount) } - fn clear_uncredited_settlement_amount( - &self, - account_id: Uuid, - ) -> Box + Send> { - trace!("Clearing uncredited_settlement_amount {:?}", account_id,); - Box::new( - cmd("DEL") - .arg(uncredited_amount_key(account_id)) - .query_async(self.connection.clone()) - .map_err(move |err| { - error!("Error clearing uncredited_settlement_amount: {:?}", err) - }) - .and_then(move |(_conn, _ret): (_, Value)| Ok(())), - ) + async fn clear_uncredited_settlement_amount(&self, account_id: Uuid) -> Result<(), ()> { + trace!("Clearing uncredited_settlement_amount {:?}", account_id); + let mut connection = self.connection.clone(); + connection + .del(uncredited_amount_key(account_id)) + .map_err(move |err| error!("Error clearing uncredited_settlement_amount: {:?}", err)) + .await?; + Ok(()) } } type RouteVec = Vec<(String, RedisAccountId)>; +use futures::future::TryFutureExt; + // TODO replace this with pubsub when async pubsub is added upstream: https://github.com/mitsuhiko/redis-rs/issues/183 -fn update_routes( - connection: RedisReconnect, +async fn update_routes( + mut connection: RedisReconnect, routing_table: Arc>>>, -) -> impl Future { +) -> Result<(), ()> { let mut pipe = redis_crate::pipe(); pipe.hgetall(ROUTES_KEY) .hgetall(STATIC_ROUTES_KEY) .get(DEFAULT_ROUTE_KEY); - pipe.query_async(connection) + let (routes, static_routes, default_route): (RouteVec, RouteVec, Option) = pipe + .query_async(&mut connection) .map_err(|err| error!("Error polling for routing table updates: {:?}", err)) - .and_then( - move |(_connection, (routes, static_routes, default_route)): ( - _, - (RouteVec, RouteVec, Option), - )| { - trace!( - "Loaded routes from redis. Static routes: {:?}, default route: {:?}, other routes: {:?}", - static_routes, - default_route, - routes - ); - // If there is a default route set in the db, - // set the entry for "" in the routing table to route to that account - let default_route_iter = iter::once(default_route) - .filter_map(|r| r) - .map(|rid| (String::new(), rid.0)); - let routes = HashMap::from_iter( - routes - .into_iter().map(|(s, rid)| (s, rid.0)) - // Include the default route if there is one - .chain(default_route_iter) - // Having the static_routes inserted after ensures that they will overwrite - // any routes with the same prefix from the first set - .chain(static_routes.into_iter().map(|(s, rid)| (s, rid.0))) - ); - // TODO we may not want to print this because the routing table will be very big - // if the node has a lot of local accounts - trace!("Routing table is: {:?}", routes); - *routing_table.write() = Arc::new(routes); - Ok(()) - }, - ) + .await?; + trace!( + "Loaded routes from redis. Static routes: {:?}, default route: {:?}, other routes: {:?}", + static_routes, + default_route, + routes + ); + // If there is a default route set in the db, + // set the entry for "" in the routing table to route to that account + let default_route_iter = iter::once(default_route) + .filter_map(|r| r) + .map(|rid| (String::new(), rid.0)); + let routes = HashMap::from_iter( + routes + .into_iter() + .map(|(s, rid)| (s, rid.0)) + // Include the default route if there is one + .chain(default_route_iter) + // Having the static_routes inserted after ensures that they will overwrite + // any routes with the same prefix from the first set + .chain(static_routes.into_iter().map(|(s, rid)| (s, rid.0))), + ); + // TODO we may not want to print this because the routing table will be very big + // if the node has a lot of local accounts + trace!("Routing table is: {:?}", routes); + *routing_table.write() = Arc::new(routes); + Ok(()) } // Uuid does not implement ToRedisArgs and FromRedisValue. @@ -2193,23 +2074,23 @@ impl FromRedisValue for AccountWithEncryptedTokens { "ilp_over_http_incoming_token", &hash, )? - .map(SecretBytes::from), + .map(SecretBytesMut::from), ilp_over_http_outgoing_token: get_bytes_option( "ilp_over_http_outgoing_token", &hash, )? - .map(SecretBytes::from), + .map(SecretBytesMut::from), ilp_over_btp_url: get_url_option("ilp_over_btp_url", &hash)?, ilp_over_btp_incoming_token: get_bytes_option( "ilp_over_btp_incoming_token", &hash, )? - .map(SecretBytes::from), + .map(SecretBytesMut::from), ilp_over_btp_outgoing_token: get_bytes_option( "ilp_over_btp_outgoing_token", &hash, )? - .map(SecretBytes::from), + .map(SecretBytesMut::from), max_packet_amount: get_value("max_packet_amount", &hash)?, min_balance: get_value_option("min_balance", &hash)?, settle_threshold: get_value_option("settle_threshold", &hash)?, @@ -2250,10 +2131,13 @@ where } } -fn get_bytes_option(key: &str, map: &HashMap) -> Result, RedisError> { +fn get_bytes_option( + key: &str, + map: &HashMap, +) -> Result, RedisError> { if let Some(ref value) = map.get(key) { let vec: Vec = from_redis_value(value)?; - Ok(Some(Bytes::from(vec))) + Ok(Some(BytesMut::from(vec.as_slice()))) } else { Ok(None) } @@ -2275,29 +2159,16 @@ fn get_url_option(key: &str, map: &HashMap) -> Result #[cfg(test)] mod tests { use super::*; - use futures::future; use redis_crate::IntoConnectionInfo; - use tokio::runtime::Runtime; - - #[test] - fn connect_fails_if_db_unavailable() { - let mut runtime = Runtime::new().unwrap(); - runtime - .block_on(future::lazy( - || -> Box + Send> { - Box::new( - RedisStoreBuilder::new( - "redis://127.0.0.1:0".into_connection_info().unwrap() as ConnectionInfo, - [0; 32], - ) - .connect() - .then(|result| { - assert!(result.is_err()); - Ok(()) - }), - ) - }, - )) - .unwrap(); + + #[tokio::test] + async fn connect_fails_if_db_unavailable() { + let result = RedisStoreBuilder::new( + "redis://127.0.0.1:0".into_connection_info().unwrap() as ConnectionInfo, + [0; 32], + ) + .connect() + .await; + assert!(result.is_err()); } } diff --git a/crates/interledger-store/src/redis/reconnect.rs b/crates/interledger-store/src/redis/reconnect.rs index ed9680927..735e0dca7 100644 --- a/crates/interledger-store/src/redis/reconnect.rs +++ b/crates/interledger-store/src/redis/reconnect.rs @@ -1,43 +1,53 @@ -use futures::{ - future::{err, result, Either}, - Future, -}; +use futures::future::{FutureExt, TryFutureExt}; use log::{debug, error}; use parking_lot::RwLock; use redis_crate::{ - aio::{ConnectionLike, SharedConnection}, - Client, ConnectionInfo, RedisError, Value, + aio::{ConnectionLike, MultiplexedConnection}, + Client, Cmd, ConnectionInfo, Pipeline, RedisError, RedisFuture, Value, }; use std::sync::Arc; -/// Wrapper around a Redis SharedConnection that automatically +type Result = std::result::Result; + +/// Wrapper around a Redis MultiplexedConnection that automatically /// attempts to reconnect to the DB if the connection is dropped #[derive(Clone)] pub struct RedisReconnect { pub(crate) redis_info: Arc, - pub(crate) conn: Arc>, + pub(crate) conn: Arc>, +} + +async fn get_shared_connection(redis_info: Arc) -> Result { + let client = Client::open((*redis_info).clone())?; + client + .get_multiplexed_tokio_connection() + .map_err(|e| { + error!("Error connecting to Redis: {:?}", e); + e + }) + .await } impl RedisReconnect { - pub fn connect( - redis_info: ConnectionInfo, - ) -> impl Future { + /// Connects to redis with the provided [`ConnectionInfo`](redis_crate::ConnectionInfo) + pub async fn connect(redis_info: ConnectionInfo) -> Result { let redis_info = Arc::new(redis_info); - get_shared_connection(redis_info.clone()).map(move |conn| RedisReconnect { + let conn = get_shared_connection(redis_info.clone()).await?; + Ok(RedisReconnect { conn: Arc::new(RwLock::new(conn)), redis_info, }) } - pub fn reconnect(self) -> impl Future { - get_shared_connection(self.redis_info.clone()).and_then(move |shared_connection| { - (*self.conn.write()) = shared_connection; - debug!("Reconnected to Redis"); - Ok(self) - }) + /// Reconnects to redis + pub async fn reconnect(self) -> Result { + let shared_connection = get_shared_connection(self.redis_info.clone()).await?; + (*self.conn.write()) = shared_connection; + debug!("Reconnected to Redis"); + Ok(self) } - fn get_shared_connection(&self) -> SharedConnection { + fn get_shared_connection(&self) -> MultiplexedConnection { self.conn.read().clone() } } @@ -47,56 +57,46 @@ impl ConnectionLike for RedisReconnect { self.conn.read().get_db() } - fn req_packed_command( - self, - cmd: Vec, - ) -> Box + Send> { - let clone = self.clone(); - Box::new( - self.get_shared_connection() - .req_packed_command(cmd) - .or_else(move |error| { + fn req_packed_command<'a>(&'a mut self, cmd: &'a Cmd) -> RedisFuture<'a, Value> { + // This is how it is implemented in the redis-rs repository + (async move { + let mut connection = self.get_shared_connection(); + match connection.req_packed_command(cmd).await { + Ok(res) => Ok(res), + Err(error) => { if error.is_connection_dropped() { debug!("Redis connection was dropped, attempting to reconnect"); - Either::A(clone.reconnect().then(|_| Err(error))) - } else { - Either::B(err(error)) + // TODO: Is this correct syntax? Otherwise we get an unused result warning + let _ = self.clone().reconnect().await; } - }) - .map(move |(_conn, res)| (self, res)), - ) + Err(error) + } + } + }) + .boxed() } - fn req_packed_commands( - self, - cmd: Vec, + fn req_packed_commands<'a>( + &'a mut self, + cmd: &'a Pipeline, offset: usize, count: usize, - ) -> Box), Error = RedisError> + Send> { - let clone = self.clone(); - Box::new( - self.get_shared_connection() - .req_packed_commands(cmd, offset, count) - .or_else(move |error| { + ) -> RedisFuture<'a, Vec> { + // This is how it is implemented in the redis-rs repository + (async move { + let mut connection = self.get_shared_connection(); + match connection.req_packed_commands(cmd, offset, count).await { + Ok(res) => Ok(res), + Err(error) => { if error.is_connection_dropped() { debug!("Redis connection was dropped, attempting to reconnect"); - Either::A(clone.reconnect().then(|_| Err(error))) - } else { - Either::B(err(error)) + // TODO: Is this correct syntax? Otherwise we get an unused result warning + let _ = self.clone().reconnect().await; } - }) - .map(|(_conn, res)| (self, res)), - ) - } -} - -fn get_shared_connection( - redis_info: Arc, -) -> impl Future { - result(Client::open((*redis_info).clone())).and_then(|client| { - client.get_shared_async_connection().map_err(|e| { - error!("Error connecting to Redis: {:?}", e); - e + Err(error) + } + } }) - }) + .boxed() + } } diff --git a/crates/interledger-store/tests/redis/accounts_test.rs b/crates/interledger-store/tests/redis/accounts_test.rs index 31e4d25e4..16dfcd572 100644 --- a/crates/interledger-store/tests/redis/accounts_test.rs +++ b/crates/interledger-store/tests/redis/accounts_test.rs @@ -1,489 +1,281 @@ use super::{fixtures::*, redis_helpers::*, store_helpers::*}; -use futures::future::{result, Either, Future}; +use futures::future::Either; +use futures::TryFutureExt; use interledger_api::{AccountSettings, NodeStore}; -use interledger_btp::{BtpAccount, BtpStore}; +use interledger_btp::BtpAccount; use interledger_ccp::{CcpRoutingAccount, RoutingRelation}; -use interledger_http::{HttpAccount, HttpStore}; +use interledger_http::HttpAccount; use interledger_packet::Address; use interledger_service::Account as AccountTrait; use interledger_service::{AccountStore, AddressStore, Username}; use interledger_service_util::BalanceStore; use interledger_store::redis::RedisStoreBuilder; -use log::{debug, error}; use redis_crate::Client; use secrecy::ExposeSecret; use secrecy::SecretString; +use std::default::Default; use std::str::FromStr; use uuid::Uuid; -#[test] -fn picks_up_parent_during_initialization() { +#[tokio::test] +async fn picks_up_parent_during_initialization() { let context = TestContext::new(); - block_on( - result(Client::open(context.get_client_connection_info())) - .map_err(|err| error!("Error creating Redis client: {:?}", err)) - .and_then(|client| { - debug!("Connected to redis: {:?}", client); - client - .get_shared_async_connection() - .map_err(|err| error!("Error connecting to Redis: {:?}", err)) - }) - .and_then(move |connection| { - // we set a parent that was already configured via perhaps a - // previous account insertion. that means that when we connect - // to the store we will always get the configured parent (if - // there was one)) - redis_crate::cmd("SET") - .arg("parent_node_account_address") - .arg("example.bob.node") - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_, _): (_, redis_crate::Value)| { - RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) - .connect() - .and_then(move |store| { - // the store's ilp address is the store's - // username appended to the parent's address - assert_eq!( - store.get_ilp_address(), - Address::from_str("example.bob.node").unwrap() - ); - let _ = context; - Ok(()) - }) - }) - }), - ) - .unwrap(); + let client = Client::open(context.get_client_connection_info()).unwrap(); + let mut connection = client.get_multiplexed_tokio_connection().await.unwrap(); + + // we set a parent that was already configured via perhaps a + // previous account insertion. that means that when we connect + // to the store we will always get the configured parent (if + // there was one)) + let _: redis_crate::Value = redis_crate::cmd("SET") + .arg("parent_node_account_address") + .arg("example.bob.node") + .query_async(&mut connection) + .await + .unwrap(); + + let store = RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) + .connect() + .await + .unwrap(); + // the store's ilp address is the store's + // username appended to the parent's address + assert_eq!( + store.get_ilp_address(), + Address::from_str("example.bob.node").unwrap() + ); } -#[test] -fn insert_accounts() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .insert_account(ACCOUNT_DETAILS_2.clone()) - .and_then(move |account| { - assert_eq!( - *account.ilp_address(), - Address::from_str("example.alice.user1.charlie").unwrap() - ); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn insert_accounts() { + let (store, _context, _) = test_store().await.unwrap(); + let account = store + .insert_account(ACCOUNT_DETAILS_2.clone()) + .await + .unwrap(); + assert_eq!( + *account.ilp_address(), + Address::from_str("example.alice.user1.charlie").unwrap() + ); } -#[test] -fn update_ilp_and_children_addresses() { - block_on(test_store().and_then(|(store, context, accs)| { - store - // Add a NonRoutingAccount to make sure its address - // gets updated as well - .insert_account(ACCOUNT_DETAILS_2.clone()) - .and_then(move |acc2| { - let mut accs = accs.clone(); - accs.push(acc2); - accs.sort_by_key(|a| a.username().clone()); - let ilp_address = Address::from_str("test.parent.our_address").unwrap(); - store - .set_ilp_address(ilp_address.clone()) - .and_then(move |_| { - let ret = store.get_ilp_address(); - assert_eq!(ilp_address, ret); - store.get_all_accounts().and_then(move |accounts: Vec<_>| { - let mut accounts = accounts.clone(); - accounts.sort_by(|a, b| { - a.username() - .as_bytes() - .partial_cmp(b.username().as_bytes()) - .unwrap() - }); - for (a, b) in accounts.into_iter().zip(&accs) { - if a.routing_relation() == RoutingRelation::Child - || a.routing_relation() == RoutingRelation::NonRoutingAccount - { - assert_eq!( - *a.ilp_address(), - ilp_address.with_suffix(a.username().as_bytes()).unwrap() - ); - } else { - assert_eq!(a.ilp_address(), b.ilp_address()); - } - } - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn update_ilp_and_children_addresses() { + let (store, _context, accs) = test_store().await.unwrap(); + // Add a NonRoutingAccount to make sure its address + // gets updated as well + let acc2 = store + .insert_account(ACCOUNT_DETAILS_2.clone()) + .await + .unwrap(); + let mut accs = accs.clone(); + accs.push(acc2); + accs.sort_by_key(|a| a.username().clone()); + let ilp_address = Address::from_str("test.parent.our_address").unwrap(); + + store.set_ilp_address(ilp_address.clone()).await.unwrap(); + let ret = store.get_ilp_address(); + assert_eq!(ilp_address, ret); + + let mut accounts = store.get_all_accounts().await.unwrap(); + accounts.sort_by(|a, b| { + a.username() + .as_bytes() + .partial_cmp(b.username().as_bytes()) + .unwrap() + }); + for (a, b) in accounts.into_iter().zip(&accs) { + if a.routing_relation() == RoutingRelation::Child + || a.routing_relation() == RoutingRelation::NonRoutingAccount + { + assert_eq!( + *a.ilp_address(), + ilp_address.with_suffix(a.username().as_bytes()).unwrap() + ); + } else { + assert_eq!(a.ilp_address(), b.ilp_address()); + } + } } -#[test] -fn only_one_parent_allowed() { +#[tokio::test] +async fn only_one_parent_allowed() { let mut acc = ACCOUNT_DETAILS_2.clone(); acc.routing_relation = Some("Parent".to_owned()); acc.username = Username::from_str("another_name").unwrap(); acc.ilp_address = Some(Address::from_str("example.another_name").unwrap()); - block_on(test_store().and_then(|(store, context, accs)| { - store.insert_account(acc.clone()).then(move |res| { - // This should fail - assert!(res.is_err()); - futures::future::join_all(vec![ - Either::A(store.delete_account(accs[0].id()).and_then(|_| Ok(()))), - // must also clear the ILP Address to indicate that we no longer - // have a parent account configured - Either::B(store.clear_ilp_address()), - ]) - .and_then(move |_| { - store.insert_account(acc).and_then(move |_| { - // the call was successful, so the parent was succesfully added - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let (store, _context, accs) = test_store().await.unwrap(); + let res = store.insert_account(acc.clone()).await; + // This should fail + assert!(res.is_err()); + futures::future::join_all(vec![ + Either::Left(store.delete_account(accs[0].id()).map_ok(|_| ())), + // must also clear the ILP Address to indicate that we no longer + // have a parent account configured + Either::Right(store.clear_ilp_address()), + ]) + .await; + let res = store.insert_account(acc).await; + assert!(res.is_ok()); } -#[test] -fn delete_accounts() { - block_on(test_store().and_then(|(store, context, _accs)| { - store.get_all_accounts().and_then(move |accounts| { - let id = accounts[0].id(); - store.delete_account(id).and_then(move |_| { - store.get_all_accounts().and_then(move |accounts| { - for a in accounts { - assert_ne!(id, a.id()); - } - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn delete_accounts() { + let (store, _context, _) = test_store().await.unwrap(); + let accounts = store.get_all_accounts().await.unwrap(); + let id = accounts[0].id(); + store.delete_account(id).await.unwrap(); + let accounts = store.get_all_accounts().await.unwrap(); + for a in accounts { + assert_ne!(id, a.id()); + } } -#[test] -fn update_accounts() { - block_on(test_store().and_then(|(store, context, accounts)| { - context - .async_connection() - .map_err(|err| panic!(err)) - .and_then(move |connection| { - let id = accounts[0].id(); - redis_crate::cmd("HMSET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg(600) - .arg("prepaid_amount") - .arg(400) - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_, _): (_, redis_crate::Value)| { - let mut new = ACCOUNT_DETAILS_0.clone(); - new.asset_code = String::from("TUV"); - store.update_account(id, new).and_then(move |account| { - assert_eq!(account.asset_code(), "TUV"); - store.get_balance(account).and_then(move |balance| { - assert_eq!(balance, 1000); - let _ = context; - Ok(()) - }) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn update_accounts() { + let (store, context, accounts) = test_store().await.unwrap(); + let mut connection = context.async_connection().await.unwrap(); + let id = accounts[0].id(); + let _: redis_crate::Value = redis_crate::cmd("HMSET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg(600u64) + .arg("prepaid_amount") + .arg(400u64) + .query_async(&mut connection) + .await + .unwrap(); + let mut new = ACCOUNT_DETAILS_0.clone(); + new.asset_code = String::from("TUV"); + let account = store.update_account(id, new).await.unwrap(); + assert_eq!(account.asset_code(), "TUV"); + let balance = store.get_balance(account).await.unwrap(); + assert_eq!(balance, 1000); } -#[test] -fn modify_account_settings_settle_to_overflow() { - block_on(test_store().and_then(|(store, context, accounts)| { - let mut settings = AccountSettings::default(); - // Redis.rs cannot save a value larger than i64::MAX - settings.settle_to = Some(std::i64::MAX as u64 + 1); - let account = accounts[0].clone(); - let id = account.id(); - store - .modify_account_settings(id, settings) - .then(move |ret| { - assert!(ret.is_err()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn modify_account_settings_settle_to_overflow() { + let (store, _context, accounts) = test_store().await.unwrap(); + let mut settings = AccountSettings::default(); + // Redis.rs cannot save a value larger than i64::MAX + settings.settle_to = Some(std::i64::MAX as u64 + 1); + let account = accounts[0].clone(); + let id = account.id(); + let ret = store.modify_account_settings(id, settings).await; + assert!(ret.is_err()); } -use std::default::Default; -#[test] -fn modify_account_settings_unchanged() { - block_on(test_store().and_then(|(store, context, accounts)| { - let settings = AccountSettings::default(); - let account = accounts[0].clone(); +#[tokio::test] +async fn modify_account_settings_unchanged() { + let (store, _context, accounts) = test_store().await.unwrap(); + let settings = AccountSettings::default(); + let account = accounts[0].clone(); - let id = account.id(); - store - .modify_account_settings(id, settings) - .and_then(move |ret| { - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - ret.get_http_auth_token().unwrap().expose_secret(), - ); - assert_eq!( - account.get_ilp_over_btp_outgoing_token().unwrap(), - ret.get_ilp_over_btp_outgoing_token().unwrap() - ); - // Cannot check other parameters since they are only pub(crate). - let _ = context; - Ok(()) - }) - })) - .unwrap(); -} + let id = account.id(); + let ret = store.modify_account_settings(id, settings).await.unwrap(); -#[test] -fn modify_account_settings() { - block_on(test_store().and_then(|(store, context, accounts)| { - let settings = AccountSettings { - ilp_over_http_outgoing_token: Some(SecretString::new("test_token".to_owned())), - ilp_over_http_incoming_token: Some(SecretString::new("http_in_new".to_owned())), - ilp_over_btp_outgoing_token: Some(SecretString::new("dylan:test".to_owned())), - ilp_over_btp_incoming_token: Some(SecretString::new("btp_in_new".to_owned())), - ilp_over_http_url: Some("http://example.com/accounts/dylan/ilp".to_owned()), - ilp_over_btp_url: Some("http://example.com/accounts/dylan/ilp/btp".to_owned()), - settle_threshold: Some(-50), - settle_to: Some(100), - }; - let account = accounts[0].clone(); - - let id = account.id(); - store - .modify_account_settings(id, settings) - .and_then(move |ret| { - assert_eq!( - ret.get_http_auth_token().unwrap().expose_secret(), - "test_token", - ); - assert_eq!( - ret.get_ilp_over_btp_outgoing_token().unwrap(), - &b"dylan:test"[..], - ); - // Cannot check other parameters since they are only pub(crate). - let _ = context; - Ok(()) - }) - })) - .unwrap(); -} - -#[test] -fn starts_with_zero_balance() { - block_on(test_store().and_then(|(store, context, accs)| { - let account0 = accs[0].clone(); - store.get_balance(account0).and_then(move |balance| { - assert_eq!(balance, 0); - let _ = context; - Ok(()) - }) - })) - .unwrap(); + assert_eq!( + account.get_http_auth_token().unwrap().expose_secret(), + ret.get_http_auth_token().unwrap().expose_secret(), + ); + assert_eq!( + account.get_ilp_over_btp_outgoing_token().unwrap(), + ret.get_ilp_over_btp_outgoing_token().unwrap() + ); } -#[test] -fn fetches_account_from_username() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .get_account_id_from_username(&Username::from_str("alice").unwrap()) - .and_then(move |account_id| { - assert_eq!(account_id, accs[0].id()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); -} - -#[test] -fn duplicate_http_incoming_auth_works() { - let mut duplicate = ACCOUNT_DETAILS_2.clone(); - duplicate.ilp_over_http_incoming_token = - Some(SecretString::new("incoming_auth_token".to_string())); - block_on(test_store().and_then(|(store, context, accs)| { - let original = accs[0].clone(); - let original_id = original.id(); - store.insert_account(duplicate).and_then(move |duplicate| { - let duplicate_id = duplicate.id(); - assert_ne!(original_id, duplicate_id); - futures::future::join_all(vec![ - store.get_account_from_http_auth( - &Username::from_str("alice").unwrap(), - "incoming_auth_token", - ), - store.get_account_from_http_auth( - &Username::from_str("charlie").unwrap(), - "incoming_auth_token", - ), - ]) - .and_then(move |accs| { - // Alice and Charlie had the same auth token, but they had a - // different username/account id, so no problem. - assert_ne!(accs[0].id(), accs[1].id()); - assert_eq!(accs[0].id(), original_id); - assert_eq!(accs[1].id(), duplicate_id); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap(); -} +#[tokio::test] +async fn modify_account_settings() { + let (store, _context, accounts) = test_store().await.unwrap(); + let settings = AccountSettings { + ilp_over_http_outgoing_token: Some(SecretString::new("test_token".to_owned())), + ilp_over_http_incoming_token: Some(SecretString::new("http_in_new".to_owned())), + ilp_over_btp_outgoing_token: Some(SecretString::new("dylan:test".to_owned())), + ilp_over_btp_incoming_token: Some(SecretString::new("btp_in_new".to_owned())), + ilp_over_http_url: Some("http://example.com/accounts/dylan/ilp".to_owned()), + ilp_over_btp_url: Some("http://example.com/accounts/dylan/ilp/btp".to_owned()), + settle_threshold: Some(-50), + settle_to: Some(100), + }; + let account = accounts[0].clone(); -#[test] -fn gets_account_from_btp_auth() { - block_on(test_store().and_then(|(store, context, accs)| { - // alice's incoming btp token is the username/password to get her - // account's information - store - .get_account_from_btp_auth(&Username::from_str("alice").unwrap(), "btp_token") - .and_then(move |acc| { - assert_eq!(acc.id(), accs[0].id()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); + let id = account.id(); + let ret = store.modify_account_settings(id, settings).await.unwrap(); + assert_eq!( + ret.get_http_auth_token().unwrap().expose_secret(), + "test_token", + ); + assert_eq!( + ret.get_ilp_over_btp_outgoing_token().unwrap(), + &b"dylan:test"[..], + ); } -#[test] -fn gets_account_from_http_auth() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .get_account_from_http_auth( - &Username::from_str("alice").unwrap(), - "incoming_auth_token", - ) - .and_then(move |acc| { - assert_eq!(acc.id(), accs[0].id()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn starts_with_zero_balance() { + let (store, _context, accs) = test_store().await.unwrap(); + let account0 = accs[0].clone(); + let balance = store.get_balance(account0).await.unwrap(); + assert_eq!(balance, 0); } -#[test] -fn duplicate_btp_incoming_auth_works() { - let mut charlie = ACCOUNT_DETAILS_2.clone(); - charlie.ilp_over_btp_incoming_token = Some(SecretString::new("btp_token".to_string())); - block_on(test_store().and_then(|(store, context, accs)| { - let alice = accs[0].clone(); - let alice_id = alice.id(); - store.insert_account(charlie).and_then(move |charlie| { - let charlie_id = charlie.id(); - assert_ne!(alice_id, charlie_id); - futures::future::join_all(vec![ - store.get_account_from_btp_auth(&Username::from_str("alice").unwrap(), "btp_token"), - store.get_account_from_btp_auth( - &Username::from_str("charlie").unwrap(), - "btp_token", - ), - ]) - .and_then(move |accs| { - assert_ne!(accs[0].id(), accs[1].id()); - assert_eq!(accs[0].id(), alice_id); - assert_eq!(accs[1].id(), charlie_id); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn fetches_account_from_username() { + let (store, _context, accs) = test_store().await.unwrap(); + let account_id = store + .get_account_id_from_username(&Username::from_str("alice").unwrap()) + .await + .unwrap(); + assert_eq!(account_id, accs[0].id()); } -#[test] -fn get_all_accounts() { - block_on(test_store().and_then(|(store, context, _accs)| { - store.get_all_accounts().and_then(move |accounts| { - assert_eq!(accounts.len(), 2); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn get_all_accounts() { + let (store, _context, _) = test_store().await.unwrap(); + let accounts = store.get_all_accounts().await.unwrap(); + assert_eq!(accounts.len(), 2); } -#[test] -fn gets_single_account() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone = store.clone(); - let acc = accs[0].clone(); - store_clone - .get_accounts(vec![acc.id()]) - .and_then(move |accounts| { - assert_eq!(accounts[0].ilp_address(), acc.ilp_address()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn gets_single_account() { + let (store, _context, accs) = test_store().await.unwrap(); + let acc = accs[0].clone(); + let accounts = store.get_accounts(vec![acc.id()]).await.unwrap(); + assert_eq!(accounts[0].ilp_address(), acc.ilp_address()); } -#[test] -fn gets_multiple() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone = store.clone(); - // set account ids in reverse order - let account_ids: Vec = accs.iter().rev().map(|a| a.id()).collect::<_>(); - store_clone - .get_accounts(account_ids) - .and_then(move |accounts| { - // note reverse order is intentional - assert_eq!(accounts[0].ilp_address(), accs[1].ilp_address()); - assert_eq!(accounts[1].ilp_address(), accs[0].ilp_address()); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn gets_multiple() { + let (store, _context, accs) = test_store().await.unwrap(); + // set account ids in reverse order + let account_ids: Vec = accs.iter().rev().map(|a| a.id()).collect::<_>(); + let accounts = store.get_accounts(account_ids).await.unwrap(); + // note reverse order is intentional + assert_eq!(accounts[0].ilp_address(), accs[1].ilp_address()); + assert_eq!(accounts[1].ilp_address(), accs[0].ilp_address()); } -#[test] -fn decrypts_outgoing_tokens_acc() { - block_on(test_store().and_then(|(store, context, accs)| { - let acc = accs[0].clone(); - store - .get_accounts(vec![acc.id()]) - .and_then(move |accounts| { - let account = accounts[0].clone(); - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - acc.get_http_auth_token().unwrap().expose_secret(), - ); - assert_eq!( - account.get_ilp_over_btp_outgoing_token().unwrap(), - acc.get_ilp_over_btp_outgoing_token().unwrap(), - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn decrypts_outgoing_tokens_acc() { + let (store, _context, accs) = test_store().await.unwrap(); + let acc = accs[0].clone(); + let accounts = store.get_accounts(vec![acc.id()]).await.unwrap(); + let account = accounts[0].clone(); + assert_eq!( + account.get_http_auth_token().unwrap().expose_secret(), + acc.get_http_auth_token().unwrap().expose_secret(), + ); + assert_eq!( + account.get_ilp_over_btp_outgoing_token().unwrap(), + acc.get_ilp_over_btp_outgoing_token().unwrap(), + ); } -#[test] -fn errors_for_unknown_accounts() { - let result = block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_accounts(vec![Uuid::new_v4(), Uuid::new_v4()]) - .then(move |result| { - let _ = context; - result - }) - })); +#[tokio::test] +async fn errors_for_unknown_accounts() { + let (store, _context, _) = test_store().await.unwrap(); + let result = store + .get_accounts(vec![Uuid::new_v4(), Uuid::new_v4()]) + .await; assert!(result.is_err()); } diff --git a/crates/interledger-store/tests/redis/balances_test.rs b/crates/interledger-store/tests/redis/balances_test.rs index 20f3fece5..88d18f1c4 100644 --- a/crates/interledger-store/tests/redis/balances_test.rs +++ b/crates/interledger-store/tests/redis/balances_test.rs @@ -1,5 +1,5 @@ use super::{fixtures::*, store_helpers::*}; -use futures::future::{self, Either, Future}; + use interledger_api::NodeStore; use interledger_packet::Address; use interledger_service::{Account as AccountTrait, AddressStore}; @@ -9,90 +9,64 @@ use interledger_store::account::Account; use std::str::FromStr; use uuid::Uuid; -#[test] -fn get_balance() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account_id = Uuid::new_v4(); - context - .async_connection() - .map_err(move |err| panic!(err)) - .and_then(move |connection| { - redis_crate::cmd("HMSET") - .arg(format!("accounts:{}", account_id)) - .arg("balance") - .arg(600) - .arg("prepaid_amount") - .arg(400) - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_, _): (_, redis_crate::Value)| { - let account = Account::try_from( - account_id, - ACCOUNT_DETAILS_0.clone(), - store.get_ilp_address(), - ) - .unwrap(); - store.get_balance(account).and_then(move |balance| { - assert_eq!(balance, 1000); - let _ = context; - Ok(()) - }) - }) - }) - })) +#[tokio::test] +async fn get_balance() { + let (store, context, _accs) = test_store().await.unwrap(); + let account_id = Uuid::new_v4(); + let mut connection = context.async_connection().await.unwrap(); + let _: redis_crate::Value = redis_crate::cmd("HMSET") + .arg(format!("accounts:{}", account_id)) + .arg("balance") + .arg(600u64) + .arg("prepaid_amount") + .arg(400u64) + .query_async(&mut connection) + .await + .unwrap(); + let account = Account::try_from( + account_id, + ACCOUNT_DETAILS_0.clone(), + store.get_ilp_address(), + ) .unwrap(); + let balance = store.get_balance(account).await.unwrap(); + assert_eq!(balance, 1000); } -#[test] -fn prepare_then_fulfill_with_settlement() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone_1 = store.clone(); - let store_clone_2 = store.clone(); - store - .clone() - .get_accounts(vec![accs[0].id(), accs[1].id()]) - .map_err(|_err| panic!("Unable to get accounts")) - .and_then(move |accounts| { - let account0 = accounts[0].clone(); - let account1 = accounts[1].clone(); - store - // reduce account 0's balance by 100 - .update_balances_for_prepare(accounts[0].clone(), 100) - .and_then(move |_| { - store_clone_1 - .clone() - .get_balance(accounts[0].clone()) - .join(store_clone_1.clone().get_balance(accounts[1].clone())) - .and_then(|(balance0, balance1)| { - assert_eq!(balance0, -100); - assert_eq!(balance1, 0); - Ok(()) - }) - }) - .and_then(move |_| { - store_clone_2 - .clone() - .update_balances_for_fulfill(account1.clone(), 100) - .and_then(move |_| { - store_clone_2 - .clone() - .get_balance(account0.clone()) - .join(store_clone_2.clone().get_balance(account1.clone())) - .and_then(move |(balance0, balance1)| { - assert_eq!(balance0, -100); - assert_eq!(balance1, -1000); // the account must be settled down to -1000 - let _ = context; - Ok(()) - }) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn prepare_then_fulfill_with_settlement() { + let (store, _context, accs) = test_store().await.unwrap(); + let accounts = store + .get_accounts(vec![accs[0].id(), accs[1].id()]) + .await + .unwrap(); + let account0 = accounts[0].clone(); + let account1 = accounts[1].clone(); + // reduce account 0's balance by 100 + store + .update_balances_for_prepare(account0.clone(), 100) + .await + .unwrap(); + // TODO:Can we make get_balance take a reference to the account? + // Even better, we should make it just take the account uid/username! + let balance0 = store.get_balance(account0.clone()).await.unwrap(); + let balance1 = store.get_balance(account1.clone()).await.unwrap(); + assert_eq!(balance0, -100); + assert_eq!(balance1, 0); + + // Account 1 hits the settlement limit (?) TODO + store + .update_balances_for_fulfill(account1.clone(), 100) + .await + .unwrap(); + let balance0 = store.get_balance(account0).await.unwrap(); + let balance1 = store.get_balance(account1).await.unwrap(); + assert_eq!(balance0, -100); + assert_eq!(balance1, -1000); } -#[test] -fn process_fulfill_no_settle_to() { +#[tokio::test] +async fn process_fulfill_no_settle_to() { // account without a settle_to let acc = { let mut acc = ACCOUNT_DETAILS_1.clone(); @@ -104,31 +78,21 @@ fn process_fulfill_no_settle_to() { acc.settle_to = None; acc }; - block_on(test_store().and_then(|(store, context, _accs)| { - let store_clone = store.clone(); - store.clone().insert_account(acc).and_then(move |account| { - let id = account.id(); - store_clone - .get_accounts(vec![id]) - .and_then(move |accounts| { - let acc = accounts[0].clone(); - store_clone - .clone() - .update_balances_for_fulfill(acc.clone(), 100) - .and_then(move |(balance, amount_to_settle)| { - assert_eq!(balance, 100); - assert_eq!(amount_to_settle, 0); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let (store, _context, _accs) = test_store().await.unwrap(); + let account = store.insert_account(acc).await.unwrap(); + let id = account.id(); + let accounts = store.get_accounts(vec![id]).await.unwrap(); + let acc = accounts[0].clone(); + let (balance, amount_to_settle) = store + .update_balances_for_fulfill(acc.clone(), 100) + .await + .unwrap(); + assert_eq!(balance, 100); + assert_eq!(amount_to_settle, 0); } -#[test] -fn process_fulfill_settle_to_over_threshold() { +#[tokio::test] +async fn process_fulfill_settle_to_over_threshold() { // account misconfigured with settle_to >= settle_threshold does not get settlements let acc = { let mut acc = ACCOUNT_DETAILS_1.clone(); @@ -141,31 +105,21 @@ fn process_fulfill_settle_to_over_threshold() { acc.ilp_over_btp_incoming_token = None; acc }; - block_on(test_store().and_then(|(store, context, _accs)| { - let store_clone = store.clone(); - store.clone().insert_account(acc).and_then(move |acc| { - let id = acc.id(); - store_clone - .get_accounts(vec![id]) - .and_then(move |accounts| { - let acc = accounts[0].clone(); - store_clone - .clone() - .update_balances_for_fulfill(acc.clone(), 1000) - .and_then(move |(balance, amount_to_settle)| { - assert_eq!(balance, 1000); - assert_eq!(amount_to_settle, 0); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let (store, _context, _accs) = test_store().await.unwrap(); + let acc = store.insert_account(acc).await.unwrap(); + let id = acc.id(); + let accounts = store.get_accounts(vec![id]).await.unwrap(); + let acc = accounts[0].clone(); + let (balance, amount_to_settle) = store + .update_balances_for_fulfill(acc.clone(), 1000) + .await + .unwrap(); + assert_eq!(balance, 1000); + assert_eq!(amount_to_settle, 0); } -#[test] -fn process_fulfill_ok() { +#[tokio::test] +async fn process_fulfill_ok() { // account with settle to = 0 (not falsy) with settle_threshold > 0, gets settlements let acc = { let mut acc = ACCOUNT_DETAILS_1.clone(); @@ -178,160 +132,99 @@ fn process_fulfill_ok() { acc.ilp_over_btp_incoming_token = None; acc }; - block_on(test_store().and_then(|(store, context, _accs)| { - let store_clone = store.clone(); - store.clone().insert_account(acc).and_then(move |account| { - let id = account.id(); - store_clone - .get_accounts(vec![id]) - .and_then(move |accounts| { - let acc = accounts[0].clone(); - store_clone - .clone() - .update_balances_for_fulfill(acc.clone(), 101) - .and_then(move |(balance, amount_to_settle)| { - assert_eq!(balance, 0); - assert_eq!(amount_to_settle, 101); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let (store, _context, _accs) = test_store().await.unwrap(); + let account = store.insert_account(acc).await.unwrap(); + let id = account.id(); + let accounts = store.get_accounts(vec![id]).await.unwrap(); + let acc = accounts[0].clone(); + let (balance, amount_to_settle) = store + .update_balances_for_fulfill(acc.clone(), 101) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(amount_to_settle, 101); } -#[test] -fn prepare_then_reject() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone_1 = store.clone(); - let store_clone_2 = store.clone(); - store - .clone() - .get_accounts(vec![accs[0].id(), accs[1].id()]) - .map_err(|_err| panic!("Unable to get accounts")) - .and_then(move |accounts| { - let account0 = accounts[0].clone(); - let account1 = accounts[1].clone(); - store - .update_balances_for_prepare(accounts[0].clone(), 100) - .and_then(move |_| { - store_clone_1 - .clone() - .get_balance(accounts[0].clone()) - .join(store_clone_1.clone().get_balance(accounts[1].clone())) - .and_then(|(balance0, balance1)| { - assert_eq!(balance0, -100); - assert_eq!(balance1, 0); - Ok(()) - }) - }) - .and_then(move |_| { - store_clone_2 - .clone() - .update_balances_for_reject(account0.clone(), 100) - .and_then(move |_| { - store_clone_2 - .clone() - .get_balance(account0.clone()) - .join(store_clone_2.clone().get_balance(account1.clone())) - .and_then(move |(balance0, balance1)| { - assert_eq!(balance0, 0); - assert_eq!(balance1, 0); - let _ = context; - Ok(()) - }) - }) - }) - }) - })) - .unwrap(); +#[tokio::test] +async fn prepare_then_reject() { + let (store, _context, accs) = test_store().await.unwrap(); + let accounts = store + .get_accounts(vec![accs[0].id(), accs[1].id()]) + .await + .unwrap(); + let account0 = accounts[0].clone(); + let _account1 = accounts[1].clone(); + store + .update_balances_for_prepare(accounts[0].clone(), 100) + .await + .unwrap(); + let balance0 = store.get_balance(accounts[0].clone()).await.unwrap(); + let balance1 = store.get_balance(accounts[1].clone()).await.unwrap(); + assert_eq!(balance0, -100); + assert_eq!(balance1, 0); + store + .update_balances_for_reject(account0.clone(), 100) + .await + .unwrap(); + let balance0 = store.get_balance(accounts[0].clone()).await.unwrap(); + let balance1 = store.get_balance(accounts[1].clone()).await.unwrap(); + assert_eq!(balance0, 0); + assert_eq!(balance1, 0); } -#[test] -fn enforces_minimum_balance() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .clone() - .get_accounts(vec![accs[0].id(), accs[1].id()]) - .map_err(|_err| panic!("Unable to get accounts")) - .and_then(move |accounts| { - store - .update_balances_for_prepare(accounts[0].clone(), 10000) - .then(move |result| { - assert!(result.is_err()); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn enforces_minimum_balance() { + let (store, _context, accs) = test_store().await.unwrap(); + let accounts = store + .get_accounts(vec![accs[0].id(), accs[1].id()]) + .await + .unwrap(); + let result = store + .update_balances_for_prepare(accounts[0].clone(), 10000) + .await; + assert!(result.is_err()); } -#[test] +#[tokio::test] // Prepare and Fulfill a packet for 100 units from Account 0 to Account 1 // Then, Prepare and Fulfill a packet for 80 units from Account 1 to Account 0 -fn netting_fulfilled_balances() { - block_on(test_store().and_then(|(store, context, accs)| { - let store_clone1 = store.clone(); - let store_clone2 = store.clone(); - store - .clone() - .insert_account(ACCOUNT_DETAILS_2.clone()) - .and_then(move |acc| { - store - .clone() - .get_accounts(vec![accs[0].id(), acc.id()]) - .map_err(|_err| panic!("Unable to get accounts")) - .and_then(move |accounts| { - let account0 = accounts[0].clone(); - let account1 = accounts[1].clone(); - let account0_clone = account0.clone(); - let account1_clone = account1.clone(); - future::join_all(vec![ - Either::A(store.clone().update_balances_for_prepare( - account0.clone(), - 100, // decrement account 0 by 100 - )), - Either::B( - store - .clone() - .update_balances_for_fulfill( - account1.clone(), // increment account 1 by 100 - 100, - ) - .and_then(|_| Ok(())), - ), - ]) - .and_then(move |_| { - future::join_all(vec![ - Either::A( - store_clone1 - .clone() - .update_balances_for_prepare(account1.clone(), 80), - ), - Either::B( - store_clone1 - .clone() - .update_balances_for_fulfill(account0.clone(), 80) - .and_then(|_| Ok(())), - ), - ]) - }) - .and_then(move |_| { - store_clone2 - .clone() - .get_balance(account0_clone) - .join(store_clone2.get_balance(account1_clone)) - .and_then(move |(balance0, balance1)| { - assert_eq!(balance0, -20); - assert_eq!(balance1, 20); - let _ = context; - Ok(()) - }) - }) - }) - }) - })) - .unwrap(); +async fn netting_fulfilled_balances() { + let (store, _context, accs) = test_store().await.unwrap(); + let acc = store + .insert_account(ACCOUNT_DETAILS_2.clone()) + .await + .unwrap(); + let accounts = store + .get_accounts(vec![accs[0].id(), acc.id()]) + .await + .unwrap(); + let account0 = accounts[0].clone(); + let account1 = accounts[1].clone(); + + // decrement account 0 by 100 + store + .update_balances_for_prepare(account0.clone(), 100) + .await + .unwrap(); + // increment account 1 by 100 + store + .update_balances_for_fulfill(account1.clone(), 100) + .await + .unwrap(); + + // decrement account 1 by 80 + store + .update_balances_for_prepare(account1.clone(), 80) + .await + .unwrap(); + // increment account 0 by 80 + store + .update_balances_for_fulfill(account0.clone(), 80) + .await + .unwrap(); + + let balance0 = store.get_balance(accounts[0].clone()).await.unwrap(); + let balance1 = store.get_balance(accounts[1].clone()).await.unwrap(); + assert_eq!(balance0, -20); + assert_eq!(balance1, 20); } diff --git a/crates/interledger-store/tests/redis/btp_test.rs b/crates/interledger-store/tests/redis/btp_test.rs index 94af16200..72c0657cc 100644 --- a/crates/interledger-store/tests/redis/btp_test.rs +++ b/crates/interledger-store/tests/redis/btp_test.rs @@ -1,63 +1,79 @@ +use super::fixtures::*; + use super::store_helpers::*; -use futures::future::Future; + +use interledger_api::NodeStore; use interledger_btp::{BtpAccount, BtpStore}; use interledger_http::HttpAccount; use interledger_packet::Address; -use interledger_service::{Account, Username}; -use secrecy::ExposeSecret; +use interledger_service::{Account as AccountTrait, Username}; + +use secrecy::{ExposeSecret, SecretString}; use std::str::FromStr; -#[test] -fn gets_account_from_btp_auth() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "other_btp_token") - .and_then(move |account| { - assert_eq!( - *account.ilp_address(), - Address::from_str("example.alice.user1.bob").unwrap() - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_account_from_btp_auth() { + let (store, _context, _) = test_store().await.unwrap(); + let account = store + .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "other_btp_token") + .await + .unwrap(); + assert_eq!( + *account.ilp_address(), + Address::from_str("example.alice.user1.bob").unwrap() + ); } -#[test] -fn decrypts_outgoing_tokens_btp() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "other_btp_token") - .and_then(move |account| { - // the account is created on Dylan's connector - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - "outgoing_auth_token", - ); - assert_eq!( - &account.get_ilp_over_btp_outgoing_token().unwrap(), - b"btp_token" - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn decrypts_outgoing_tokens_btp() { + let (store, _context, _) = test_store().await.unwrap(); + let account = store + .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "other_btp_token") + .await + .unwrap(); + + // the account is created on Dylan's connector + assert_eq!( + account.get_http_auth_token().unwrap().expose_secret(), + "outgoing_auth_token", + ); + assert_eq!( + &account.get_ilp_over_btp_outgoing_token().unwrap(), + b"btp_token" + ); } -#[test] -fn errors_on_unknown_btp_token() { - let result = block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_btp_auth( - &Username::from_str("someuser").unwrap(), - "unknown_btp_token", - ) - .then(move |result| { - let _ = context; - result - }) - })); +#[tokio::test] +async fn errors_on_unknown_user_or_wrong_btp_token() { + let (store, _context, _) = test_store().await.unwrap(); + let result = store + .get_account_from_btp_auth(&Username::from_str("asdf").unwrap(), "other_btp_token") + .await; assert!(result.is_err()); + + let result = store + .get_account_from_btp_auth(&Username::from_str("bob").unwrap(), "wrong_token") + .await; + assert!(result.is_err()); +} + +#[tokio::test] +async fn duplicate_btp_incoming_auth_works() { + let mut charlie = ACCOUNT_DETAILS_2.clone(); + charlie.ilp_over_btp_incoming_token = Some(SecretString::new("btp_token".to_string())); + let (store, _context, accs) = test_store().await.unwrap(); + let alice = accs[0].clone(); + let alice_id = alice.id(); + let charlie = store.insert_account(charlie).await.unwrap(); + let charlie_id = charlie.id(); + assert_ne!(alice_id, charlie_id); + let result = futures::future::join_all(vec![ + store.get_account_from_btp_auth(&Username::from_str("alice").unwrap(), "btp_token"), + store.get_account_from_btp_auth(&Username::from_str("charlie").unwrap(), "btp_token"), + ]) + .await; + let accs: Vec<_> = result.into_iter().map(|r| r.unwrap()).collect(); + assert_ne!(accs[0].id(), accs[1].id()); + assert_eq!(accs[0].id(), alice_id); + assert_eq!(accs[1].id(), charlie_id); } diff --git a/crates/interledger-store/tests/redis/http_test.rs b/crates/interledger-store/tests/redis/http_test.rs index 3e3c2350f..a560f0670 100644 --- a/crates/interledger-store/tests/redis/http_test.rs +++ b/crates/interledger-store/tests/redis/http_test.rs @@ -1,74 +1,77 @@ +use super::fixtures::*; use super::store_helpers::*; -use futures::future::Future; + +use interledger_api::NodeStore; use interledger_btp::BtpAccount; use interledger_http::{HttpAccount, HttpStore}; use interledger_packet::Address; use interledger_service::{Account, Username}; -use secrecy::ExposeSecret; +use secrecy::{ExposeSecret, SecretString}; use std::str::FromStr; -#[test] -fn gets_account_from_http_bearer_token() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_http_auth( - &Username::from_str("alice").unwrap(), - "incoming_auth_token", - ) - .and_then(move |account| { - assert_eq!( - *account.ilp_address(), - Address::from_str("example.alice").unwrap() - ); - // this account is in Dylan's connector - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - "outgoing_auth_token", - ); - assert_eq!( - &account.get_ilp_over_btp_outgoing_token().unwrap(), - b"btp_token", - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_account_from_http_bearer_token() { + let (store, _context, _) = test_store().await.unwrap(); + let account = store + .get_account_from_http_auth(&Username::from_str("alice").unwrap(), "incoming_auth_token") + .await + .unwrap(); + assert_eq!( + *account.ilp_address(), + Address::from_str("example.alice").unwrap() + ); + assert_eq!( + account.get_http_auth_token().unwrap().expose_secret(), + "outgoing_auth_token", + ); + assert_eq!( + &account.get_ilp_over_btp_outgoing_token().unwrap(), + b"btp_token", + ); } -#[test] -fn decrypts_outgoing_tokens_http() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_http_auth( - &Username::from_str("alice").unwrap(), - "incoming_auth_token", - ) - .and_then(move |account| { - assert_eq!( - account.get_http_auth_token().unwrap().expose_secret(), - "outgoing_auth_token", - ); - assert_eq!( - &account.get_ilp_over_btp_outgoing_token().unwrap(), - b"btp_token", - ); - let _ = context; - Ok(()) - }) - })) - .unwrap() -} +#[tokio::test] +async fn errors_on_unknown_user_or_wrong_http_token() { + let (store, _context, _) = test_store().await.unwrap(); + // wrong password + let result = store + .get_account_from_http_auth(&Username::from_str("alice").unwrap(), "unknown_token") + .await; + assert!(result.is_err()); -#[test] -fn errors_on_unknown_http_auth() { - let result = block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_account_from_http_auth(&Username::from_str("someuser").unwrap(), "unknown_token") - .then(move |result| { - let _ = context; - result - }) - })); + // wrong user + let result = store + .get_account_from_http_auth(&Username::from_str("asdf").unwrap(), "incoming_auth_token") + .await; assert!(result.is_err()); } + +#[tokio::test] +async fn duplicate_http_incoming_auth_works() { + let mut duplicate = ACCOUNT_DETAILS_2.clone(); + duplicate.ilp_over_http_incoming_token = + Some(SecretString::new("incoming_auth_token".to_string())); + let (store, _context, accs) = test_store().await.unwrap(); + let original = accs[0].clone(); + let original_id = original.id(); + let duplicate = store.insert_account(duplicate).await.unwrap(); + let duplicate_id = duplicate.id(); + assert_ne!(original_id, duplicate_id); + let result = futures::future::join_all(vec![ + store.get_account_from_http_auth( + &Username::from_str("alice").unwrap(), + "incoming_auth_token", + ), + store.get_account_from_http_auth( + &Username::from_str("charlie").unwrap(), + "incoming_auth_token", + ), + ]) + .await; + let accs: Vec<_> = result.into_iter().map(|r| r.unwrap()).collect(); + // Alice and Charlie had the same auth token, but they had a + // different username/account id, so no problem. + assert_ne!(accs[0].id(), accs[1].id()); + assert_eq!(accs[0].id(), original_id); + assert_eq!(accs[1].id(), duplicate_id); +} diff --git a/crates/interledger-store/tests/redis/rate_limiting_test.rs b/crates/interledger-store/tests/redis/rate_limiting_test.rs index 178e4f0e0..6e6b1b5cf 100644 --- a/crates/interledger-store/tests/redis/rate_limiting_test.rs +++ b/crates/interledger-store/tests/redis/rate_limiting_test.rs @@ -1,98 +1,80 @@ use super::{fixtures::*, store_helpers::*}; -use futures::future::{join_all, Future}; +use futures::future::join_all; use interledger_service::AddressStore; use interledger_service_util::{RateLimitError, RateLimitStore}; use interledger_store::account::Account; use uuid::Uuid; -#[test] -fn rate_limits_number_of_packets() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account = Account::try_from( - Uuid::new_v4(), - ACCOUNT_DETAILS_0.clone(), - store.get_ilp_address(), - ) - .unwrap(); - join_all(vec![ - store.clone().apply_rate_limits(account.clone(), 10), - store.clone().apply_rate_limits(account.clone(), 10), - store.clone().apply_rate_limits(account.clone(), 10), - ]) - .then(move |result| { - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), RateLimitError::PacketLimitExceeded); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn rate_limits_number_of_packets() { + let (store, _context, _) = test_store().await.unwrap(); + let account = Account::try_from( + Uuid::new_v4(), + ACCOUNT_DETAILS_0.clone(), + store.get_ilp_address(), + ) + .unwrap(); + let results = join_all(vec![ + store.clone().apply_rate_limits(account.clone(), 10), + store.clone().apply_rate_limits(account.clone(), 10), + store.clone().apply_rate_limits(account.clone(), 10), + ]) + .await; + // The first 2 calls succeed, while the 3rd one hits the rate limit error + // because the account is only allowed 2 packets per minute + assert_eq!( + results, + vec![Ok(()), Ok(()), Err(RateLimitError::PacketLimitExceeded)] + ); } -#[test] -fn limits_amount_throughput() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account = Account::try_from( - Uuid::new_v4(), - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) - .unwrap(); - join_all(vec![ - store.clone().apply_rate_limits(account.clone(), 500), - store.clone().apply_rate_limits(account.clone(), 500), - store.clone().apply_rate_limits(account.clone(), 1), - ]) - .then(move |result| { - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), RateLimitError::ThroughputLimitExceeded); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn limits_amount_throughput() { + let (store, _context, _) = test_store().await.unwrap(); + let account = Account::try_from( + Uuid::new_v4(), + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + let results = join_all(vec![ + store.clone().apply_rate_limits(account.clone(), 500), + store.clone().apply_rate_limits(account.clone(), 500), + store.clone().apply_rate_limits(account.clone(), 1), + ]) + .await; + // The first 2 calls succeed, while the 3rd one hits the rate limit error + // because the account is only allowed 1000 units of currency per minute + assert_eq!( + results, + vec![Ok(()), Ok(()), Err(RateLimitError::ThroughputLimitExceeded)] + ); } -#[test] -fn refunds_throughput_limit_for_rejected_packets() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account = Account::try_from( - Uuid::new_v4(), - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) +#[tokio::test] +async fn refunds_throughput_limit_for_rejected_packets() { + let (store, _context, _) = test_store().await.unwrap(); + let account = Account::try_from( + Uuid::new_v4(), + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + + join_all(vec![ + store.clone().apply_rate_limits(account.clone(), 500), + store.clone().apply_rate_limits(account.clone(), 500), + ]) + .await; + + // We refund the throughput limit once, meaning we can do 1 more call before + // the error + store + .refund_throughput_limit(account.clone(), 500) + .await .unwrap(); - join_all(vec![ - store.clone().apply_rate_limits(account.clone(), 500), - store.clone().apply_rate_limits(account.clone(), 500), - ]) - .map_err(|err| panic!(err)) - .and_then(move |_| { - let store_clone = store.clone(); - let account_clone = account.clone(); - store - .clone() - .refund_throughput_limit(account.clone(), 500) - .and_then(move |_| { - store - .clone() - .apply_rate_limits(account.clone(), 500) - .map_err(|err| panic!(err)) - }) - .and_then(move |_| { - store_clone - .apply_rate_limits(account_clone, 1) - .then(move |result| { - assert!(result.is_err()); - assert_eq!( - result.unwrap_err(), - RateLimitError::ThroughputLimitExceeded - ); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() + store.apply_rate_limits(account.clone(), 500).await.unwrap(); + + let result = store.apply_rate_limits(account.clone(), 1).await; + assert_eq!(result.unwrap_err(), RateLimitError::ThroughputLimitExceeded); } diff --git a/crates/interledger-store/tests/redis/rates_test.rs b/crates/interledger-store/tests/redis/rates_test.rs index ce7a99d65..c0a52da41 100644 --- a/crates/interledger-store/tests/redis/rates_test.rs +++ b/crates/interledger-store/tests/redis/rates_test.rs @@ -1,27 +1,22 @@ use super::store_helpers::*; -use futures::future::Future; + use interledger_service_util::ExchangeRateStore; -#[test] -fn set_rates() { - block_on(test_store().and_then(|(store, context, _accs)| { - let store_clone = store.clone(); - let rates = store.get_exchange_rates(&["ABC", "XYZ"]); - assert!(rates.is_err()); - store - .set_exchange_rates( - [("ABC".to_string(), 500.0), ("XYZ".to_string(), 0.005)] - .iter() - .cloned() - .collect(), - ) - .and_then(move |_| { - let rates = store_clone.get_exchange_rates(&["XYZ", "ABC"]).unwrap(); - assert_eq!(rates[0].to_string(), "0.005"); - assert_eq!(rates[1].to_string(), "500"); - let _ = context; - Ok(()) - }) - })) - .unwrap(); +#[tokio::test] +async fn set_rates() { + let (store, _context, _) = test_store().await.unwrap(); + let rates = store.get_exchange_rates(&["ABC", "XYZ"]); + assert!(rates.is_err()); + store + .set_exchange_rates( + [("ABC".to_string(), 500.0), ("XYZ".to_string(), 0.005)] + .iter() + .cloned() + .collect(), + ) + .unwrap(); + + let rates = store.get_exchange_rates(&["XYZ", "ABC"]).unwrap(); + assert_eq!(rates[0].to_string(), "0.005"); + assert_eq!(rates[1].to_string(), "500"); } diff --git a/crates/interledger-store/tests/redis/redis_tests.rs b/crates/interledger-store/tests/redis/redis_tests.rs index c0910f487..b8f024943 100644 --- a/crates/interledger-store/tests/redis/redis_tests.rs +++ b/crates/interledger-store/tests/redis/redis_tests.rs @@ -8,6 +8,7 @@ mod routing_test; mod settlement_test; mod fixtures { + use interledger_api::AccountDetails; use interledger_packet::Address; use interledger_service::Username; @@ -88,7 +89,6 @@ mod redis_helpers { // Copied from https://github.com/mitsuhiko/redis-rs/blob/9a1777e8a90c82c315a481cdf66beb7d69e681a2/tests/support/mod.rs #![allow(dead_code)] - use futures::Future; use redis_crate::{self, RedisError}; use std::env; use std::fs; @@ -97,6 +97,8 @@ mod redis_helpers { use std::thread::sleep; use std::time::Duration; + use futures::future::TryFutureExt; + #[derive(PartialEq)] enum ServerType { Tcp, @@ -247,22 +249,21 @@ mod redis_helpers { self.client.get_connection().unwrap() } - pub fn async_connection( - &self, - ) -> impl Future { + pub async fn async_connection(&self) -> Result { self.client .get_async_connection() .map_err(|err| panic!(err)) + .await } pub fn stop_server(&mut self) { self.server.stop(); } - pub fn shared_async_connection( + pub async fn shared_async_connection( &self, - ) -> impl Future { - self.client.get_shared_async_connection() + ) -> Result { + self.client.get_multiplexed_tokio_connection().await } } } @@ -270,8 +271,7 @@ mod redis_helpers { mod store_helpers { use super::fixtures::*; use super::redis_helpers::*; - use env_logger; - use futures::Future; + use interledger_api::NodeStore; use interledger_packet::Address; use interledger_service::{Account as AccountTrait, AddressStore}; @@ -282,55 +282,37 @@ mod store_helpers { use lazy_static::lazy_static; use parking_lot::Mutex; use std::str::FromStr; - use tokio::runtime::Runtime; lazy_static! { static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); } - pub fn test_store() -> impl Future), Error = ()> { + pub async fn test_store() -> Result<(RedisStore, TestContext, Vec), ()> { let context = TestContext::new(); - RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) + let store = RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) .node_ilp_address(Address::from_str("example.node").unwrap()) .connect() - .and_then(|store| { - let store_clone = store.clone(); - let mut accs = Vec::new(); - store - .clone() - .insert_account(ACCOUNT_DETAILS_0.clone()) - .and_then(move |acc| { - accs.push(acc.clone()); - // alice is a Parent, so the store's ilp address is updated to - // the value that would be received by the ILDCP request. here, - // we just assume alice appended some data to her address - store - .clone() - .set_ilp_address(acc.ilp_address().with_suffix(b"user1").unwrap()) - .and_then(move |_| { - store_clone - .insert_account(ACCOUNT_DETAILS_1.clone()) - .and_then(move |acc| { - accs.push(acc.clone()); - Ok((store, context, accs)) - }) - }) - }) - }) - } + .await + .unwrap(); + let mut accs = Vec::new(); + let acc = store + .insert_account(ACCOUNT_DETAILS_0.clone()) + .await + .unwrap(); + accs.push(acc.clone()); + // alice is a Parent, so the store's ilp address is updated to + // the value that would be received by the ILDCP request. here, + // we just assume alice appended some data to her address + store + .set_ilp_address(acc.ilp_address().with_suffix(b"user1").unwrap()) + .await + .unwrap(); - pub fn block_on(f: F) -> Result - where - F: Future + Send + 'static, - F::Item: Send, - F::Error: Send, - { - // Only run one test at a time - let _ = env_logger::try_init(); - let lock = TEST_MUTEX.lock(); - let mut runtime = Runtime::new().unwrap(); - let result = runtime.block_on(f); - drop(lock); - result + let acc = store + .insert_account(ACCOUNT_DETAILS_1.clone()) + .await + .unwrap(); + accs.push(acc); + Ok((store, context, accs)) } } diff --git a/crates/interledger-store/tests/redis/routing_test.rs b/crates/interledger-store/tests/redis/routing_test.rs index 46cadc1eb..480800798 100644 --- a/crates/interledger-store/tests/redis/routing_test.rs +++ b/crates/interledger-store/tests/redis/routing_test.rs @@ -1,5 +1,5 @@ use super::{fixtures::*, redis_helpers::*, store_helpers::*}; -use futures::future::Future; + use interledger_api::{AccountDetails, NodeStore}; use interledger_ccp::RouteManagerStore; use interledger_packet::Address; @@ -8,378 +8,266 @@ use interledger_service::{Account as AccountTrait, AddressStore, Username}; use interledger_store::{account::Account, redis::RedisStoreBuilder}; use std::str::FromStr; use std::{collections::HashMap, time::Duration}; -use tokio_timer::sleep; use uuid::Uuid; -#[test] -fn polls_for_route_updates() { +#[tokio::test] +async fn polls_for_route_updates() { let context = TestContext::new(); - block_on( - RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) - .poll_interval(1) - .node_ilp_address(Address::from_str("example.node").unwrap()) - .connect() - .and_then(|store| { - let connection = context.async_connection(); - assert_eq!(store.routing_table().len(), 0); - let store_clone_1 = store.clone(); - let store_clone_2 = store.clone(); - store - .clone() - .insert_account(ACCOUNT_DETAILS_0.clone()) - .and_then(move |alice| { - let routing_table = store_clone_1.routing_table(); - assert_eq!(routing_table.len(), 1); - assert_eq!(*routing_table.get("example.alice").unwrap(), alice.id()); - store_clone_1 - .insert_account(AccountDetails { - ilp_address: Some(Address::from_str("example.bob").unwrap()), - username: Username::from_str("bob").unwrap(), - asset_scale: 6, - asset_code: "XYZ".to_string(), - max_packet_amount: 1000, - min_balance: Some(-1000), - ilp_over_http_url: None, - ilp_over_http_incoming_token: None, - ilp_over_http_outgoing_token: None, - ilp_over_btp_url: None, - ilp_over_btp_outgoing_token: None, - ilp_over_btp_incoming_token: None, - settle_threshold: None, - settle_to: None, - routing_relation: Some("Peer".to_owned()), - round_trip_time: None, - amount_per_minute_limit: None, - packets_per_minute_limit: None, - settlement_engine_url: None, - }) - .and_then(move |bob| { - let routing_table = store_clone_2.routing_table(); - assert_eq!(routing_table.len(), 2); - assert_eq!(*routing_table.get("example.bob").unwrap(), bob.id(),); - let alice_id = alice.id(); - let bob_id = bob.id(); - connection - .map_err(|err| panic!(err)) - .and_then(move |connection| { - redis_crate::cmd("HMSET") - .arg("routes:current") - .arg("example.alice") - .arg(bob_id.to_string()) - .arg("example.charlie") - .arg(alice_id.to_string()) - .query_async(connection) - .and_then( - |(_connection, _result): ( - _, - redis_crate::Value, - )| { - Ok(()) - }, - ) - .map_err(|err| panic!(err)) - .and_then(|_| { - sleep(Duration::from_millis(10)).then(|_| Ok(())) - }) - }) - .and_then(move |_| { - let routing_table = store_clone_2.routing_table(); - assert_eq!(routing_table.len(), 3); - assert_eq!( - *routing_table.get("example.alice").unwrap(), - bob_id - ); - assert_eq!( - *routing_table.get("example.bob").unwrap(), - bob.id(), - ); - assert_eq!( - *routing_table.get("example.charlie").unwrap(), - alice_id, - ); - assert!(routing_table.get("example.other").is_none()); - let _ = context; - Ok(()) - }) - }) - }) - }), - ) - .unwrap(); + let store = RedisStoreBuilder::new(context.get_client_connection_info(), [0; 32]) + .poll_interval(1) + .node_ilp_address(Address::from_str("example.node").unwrap()) + .connect() + .await + .unwrap(); + + let connection = context.async_connection(); + assert_eq!(store.routing_table().len(), 0); + let store_clone_1 = store.clone(); + let store_clone_2 = store.clone(); + let alice = store + .insert_account(ACCOUNT_DETAILS_0.clone()) + .await + .unwrap(); + let routing_table = store_clone_1.routing_table(); + assert_eq!(routing_table.len(), 1); + assert_eq!(*routing_table.get("example.alice").unwrap(), alice.id()); + let bob = store_clone_1 + .insert_account(AccountDetails { + ilp_address: Some(Address::from_str("example.bob").unwrap()), + username: Username::from_str("bob").unwrap(), + asset_scale: 6, + asset_code: "XYZ".to_string(), + max_packet_amount: 1000, + min_balance: Some(-1000), + ilp_over_http_url: None, + ilp_over_http_incoming_token: None, + ilp_over_http_outgoing_token: None, + ilp_over_btp_url: None, + ilp_over_btp_outgoing_token: None, + ilp_over_btp_incoming_token: None, + settle_threshold: None, + settle_to: None, + routing_relation: Some("Peer".to_owned()), + round_trip_time: None, + amount_per_minute_limit: None, + packets_per_minute_limit: None, + settlement_engine_url: None, + }) + .await + .unwrap(); + + let routing_table = store_clone_2.routing_table(); + assert_eq!(routing_table.len(), 2); + assert_eq!(*routing_table.get("example.bob").unwrap(), bob.id(),); + let alice_id = alice.id(); + let bob_id = bob.id(); + let mut connection = connection.await.unwrap(); + let _: redis_crate::Value = redis_crate::cmd("HMSET") + .arg("routes:current") + .arg("example.alice") + .arg(bob_id.to_string()) + .arg("example.charlie") + .arg(alice_id.to_string()) + .query_async(&mut connection) + .await + .unwrap(); + + tokio::time::delay_for(Duration::from_millis(10)).await; + let routing_table = store_clone_2.routing_table(); + assert_eq!(routing_table.len(), 3); + assert_eq!(*routing_table.get("example.alice").unwrap(), bob_id); + assert_eq!(*routing_table.get("example.bob").unwrap(), bob.id(),); + assert_eq!(*routing_table.get("example.charlie").unwrap(), alice_id,); + assert!(routing_table.get("example.other").is_none()); } -#[test] -fn gets_accounts_to_send_routes_to() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_accounts_to_send_routes_to(Vec::new()) - .and_then(move |accounts| { - // We send to child accounts but not parents - assert_eq!(accounts[0].username().as_ref(), "bob"); - assert_eq!(accounts.len(), 1); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_accounts_to_send_routes_to() { + let (store, _context, _) = test_store().await.unwrap(); + let accounts = store + .get_accounts_to_send_routes_to(Vec::new()) + .await + .unwrap(); + // We send to child accounts but not parents + assert_eq!(accounts[0].username().as_ref(), "bob"); + assert_eq!(accounts.len(), 1); } -#[test] -fn gets_accounts_to_send_routes_to_and_skips_ignored() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .get_accounts_to_send_routes_to(vec![accs[1].id()]) - .and_then(move |accounts| { - assert!(accounts.is_empty()); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_accounts_to_send_routes_to_and_skips_ignored() { + let (store, _context, accs) = test_store().await.unwrap(); + let accounts = store + .get_accounts_to_send_routes_to(vec![accs[1].id()]) + .await + .unwrap(); + assert!(accounts.is_empty()); } -#[test] -fn gets_accounts_to_receive_routes_from() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_accounts_to_receive_routes_from() - .and_then(move |accounts| { - assert_eq!( - *accounts[0].ilp_address(), - Address::from_str("example.alice").unwrap() - ); - assert_eq!(accounts.len(), 1); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_accounts_to_receive_routes_from() { + let (store, _context, _) = test_store().await.unwrap(); + let accounts = store.get_accounts_to_receive_routes_from().await.unwrap(); + assert_eq!( + *accounts[0].ilp_address(), + Address::from_str("example.alice").unwrap() + ); } -#[test] -fn gets_local_and_configured_routes() { - block_on(test_store().and_then(|(store, context, _accs)| { - store - .get_local_and_configured_routes() - .and_then(move |(local, configured)| { - assert_eq!(local.len(), 2); - assert!(configured.is_empty()); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn gets_local_and_configured_routes() { + let (store, _context, _) = test_store().await.unwrap(); + let (local, configured) = store.get_local_and_configured_routes().await.unwrap(); + assert_eq!(local.len(), 2); + assert!(configured.is_empty()); } -#[test] -fn saves_routes_to_db() { - block_on(test_store().and_then(|(mut store, context, _accs)| { - let get_connection = context.async_connection(); - let account0_id = Uuid::new_v4(); - let account1_id = Uuid::new_v4(); - let account0 = Account::try_from( - account0_id, - ACCOUNT_DETAILS_0.clone(), - store.get_ilp_address(), - ) +#[tokio::test] +async fn saves_routes_to_db() { + let (store, context, _) = test_store().await.unwrap(); + let get_connection = context.async_connection(); + let account0_id = Uuid::new_v4(); + let account1_id = Uuid::new_v4(); + let account0 = Account::try_from( + account0_id, + ACCOUNT_DETAILS_0.clone(), + store.get_ilp_address(), + ) + .unwrap(); + + let account1 = Account::try_from( + account1_id, + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + + store + .clone() + .set_routes(vec![ + ("example.a".to_string(), account0.clone()), + ("example.b".to_string(), account0.clone()), + ("example.c".to_string(), account1.clone()), + ]) + .await .unwrap(); - let account1 = Account::try_from( - account1_id, - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) + let mut connection = get_connection.await.unwrap(); + let routes: HashMap = redis_crate::cmd("HGETALL") + .arg("routes:current") + .query_async(&mut connection) + .await .unwrap(); + assert_eq!(routes["example.a"], account0_id.to_string()); + assert_eq!(routes["example.b"], account0_id.to_string()); + assert_eq!(routes["example.c"], account1_id.to_string()); + assert_eq!(routes.len(), 3); - store - .set_routes(vec![ - ("example.a".to_string(), account0.clone()), - ("example.b".to_string(), account0.clone()), - ("example.c".to_string(), account1.clone()), - ]) - .and_then(move |_| { - get_connection.and_then(move |connection| { - redis_crate::cmd("HGETALL") - .arg("routes:current") - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_conn, routes): (_, HashMap)| { - assert_eq!(routes["example.a"], account0_id.to_string()); - assert_eq!(routes["example.b"], account0_id.to_string()); - assert_eq!(routes["example.c"], account1_id.to_string()); - assert_eq!(routes.len(), 3); - Ok(()) - }) - }) - }) - .and_then(move |_| { - let _ = context; - Ok(()) - }) - })) - .unwrap() + // local routing table routes are also updated + let routes = store.routing_table(); + assert_eq!(routes["example.a"], account0_id); + assert_eq!(routes["example.b"], account0_id); + assert_eq!(routes["example.c"], account1_id); + assert_eq!(routes.len(), 3); } -#[test] -fn updates_local_routes() { - block_on(test_store().and_then(|(store, context, _accs)| { - let account0_id = Uuid::new_v4(); - let account1_id = Uuid::new_v4(); - let account0 = Account::try_from( - account0_id, - ACCOUNT_DETAILS_0.clone(), - store.get_ilp_address(), - ) +#[tokio::test] +async fn adds_static_routes_to_redis() { + let (store, context, accs) = test_store().await.unwrap(); + let get_connection = context.async_connection(); + store + .set_static_routes(vec![ + ("example.a".to_string(), accs[0].id()), + ("example.b".to_string(), accs[0].id()), + ("example.c".to_string(), accs[1].id()), + ]) + .await .unwrap(); - let account1 = Account::try_from( - account1_id, - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) + let mut connection = get_connection.await.unwrap(); + let routes: HashMap = redis_crate::cmd("HGETALL") + .arg("routes:static") + .query_async(&mut connection) + .await .unwrap(); - store - .clone() - .set_routes(vec![ - ("example.a".to_string(), account0.clone()), - ("example.b".to_string(), account0.clone()), - ("example.c".to_string(), account1.clone()), - ]) - .and_then(move |_| { - let routes = store.routing_table(); - assert_eq!(routes["example.a"], account0_id); - assert_eq!(routes["example.b"], account0_id); - assert_eq!(routes["example.c"], account1_id); - assert_eq!(routes.len(), 3); - Ok(()) - }) - .and_then(move |_| { - let _ = context; - Ok(()) - }) - })) - .unwrap() + assert_eq!(routes["example.a"], accs[0].id().to_string()); + assert_eq!(routes["example.b"], accs[0].id().to_string()); + assert_eq!(routes["example.c"], accs[1].id().to_string()); + assert_eq!(routes.len(), 3); } -#[test] -fn adds_static_routes_to_redis() { - block_on(test_store().and_then(|(store, context, accs)| { - let get_connection = context.async_connection(); - store - .clone() - .set_static_routes(vec![ - ("example.a".to_string(), accs[0].id()), - ("example.b".to_string(), accs[0].id()), - ("example.c".to_string(), accs[1].id()), - ]) - .and_then(move |_| { - get_connection.and_then(|connection| { - redis_crate::cmd("HGETALL") - .arg("routes:static") - .query_async(connection) - .map_err(|err| panic!(err)) - .and_then(move |(_, routes): (_, HashMap)| { - assert_eq!(routes["example.a"], accs[0].id().to_string()); - assert_eq!(routes["example.b"], accs[0].id().to_string()); - assert_eq!(routes["example.c"], accs[1].id().to_string()); - assert_eq!(routes.len(), 3); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() -} +#[tokio::test] +async fn static_routes_override_others() { + let (store, _context, accs) = test_store().await.unwrap(); + store + .set_static_routes(vec![ + ("example.a".to_string(), accs[0].id()), + ("example.b".to_string(), accs[0].id()), + ]) + .await + .unwrap(); + + let account1_id = Uuid::new_v4(); + let account1 = Account::try_from( + account1_id, + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + store + .clone() + .set_routes(vec![ + ("example.a".to_string(), account1.clone()), + ("example.b".to_string(), account1.clone()), + ("example.c".to_string(), account1), + ]) + .await + .unwrap(); -#[test] -fn static_routes_override_others() { - block_on(test_store().and_then(|(store, context, accs)| { - let mut store_clone = store.clone(); - store - .clone() - .set_static_routes(vec![ - ("example.a".to_string(), accs[0].id()), - ("example.b".to_string(), accs[0].id()), - ]) - .and_then(move |_| { - let account1_id = Uuid::new_v4(); - let account1 = Account::try_from( - account1_id, - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) - .unwrap(); - store_clone - .set_routes(vec![ - ("example.a".to_string(), account1.clone()), - ("example.b".to_string(), account1.clone()), - ("example.c".to_string(), account1), - ]) - .and_then(move |_| { - let routes = store.routing_table(); - assert_eq!(routes["example.a"], accs[0].id()); - assert_eq!(routes["example.b"], accs[0].id()); - assert_eq!(routes["example.c"], account1_id); - assert_eq!(routes.len(), 3); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap() + let routes = store.routing_table(); + assert_eq!(routes["example.a"], accs[0].id()); + assert_eq!(routes["example.b"], accs[0].id()); + assert_eq!(routes["example.c"], account1_id); + assert_eq!(routes.len(), 3); } -#[test] -fn default_route() { - block_on(test_store().and_then(|(store, context, accs)| { - let mut store_clone = store.clone(); - store - .clone() - .set_default_route(accs[0].id()) - .and_then(move |_| { - let account1_id = Uuid::new_v4(); - let account1 = Account::try_from( - account1_id, - ACCOUNT_DETAILS_1.clone(), - store.get_ilp_address(), - ) - .unwrap(); - store_clone - .set_routes(vec![ - ("example.a".to_string(), account1.clone()), - ("example.b".to_string(), account1.clone()), - ]) - .and_then(move |_| { - let routes = store.routing_table(); - assert_eq!(routes[""], accs[0].id()); - assert_eq!(routes["example.a"], account1_id); - assert_eq!(routes["example.b"], account1_id); - assert_eq!(routes.len(), 3); - let _ = context; - Ok(()) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn default_route() { + let (store, _context, accs) = test_store().await.unwrap(); + store.set_default_route(accs[0].id()).await.unwrap(); + let account1_id = Uuid::new_v4(); + let account1 = Account::try_from( + account1_id, + ACCOUNT_DETAILS_1.clone(), + store.get_ilp_address(), + ) + .unwrap(); + store + .clone() + .set_routes(vec![ + ("example.a".to_string(), account1.clone()), + ("example.b".to_string(), account1.clone()), + ]) + .await + .unwrap(); + + let routes = store.routing_table(); + assert_eq!(routes[""], accs[0].id()); + assert_eq!(routes["example.a"], account1_id); + assert_eq!(routes["example.b"], account1_id); + assert_eq!(routes.len(), 3); } -#[test] -fn returns_configured_routes_for_route_manager() { - block_on(test_store().and_then(|(store, context, accs)| { - store - .clone() - .set_static_routes(vec![ - ("example.a".to_string(), accs[0].id()), - ("example.b".to_string(), accs[1].id()), - ]) - .and_then(move |_| store.get_local_and_configured_routes()) - .and_then(move |(_local, configured)| { - assert_eq!(configured.len(), 2); - assert_eq!(configured["example.a"].id(), accs[0].id()); - assert_eq!(configured["example.b"].id(), accs[1].id()); - let _ = context; - Ok(()) - }) - })) - .unwrap() +#[tokio::test] +async fn returns_configured_routes_for_route_manager() { + let (store, _context, accs) = test_store().await.unwrap(); + store + .set_static_routes(vec![ + ("example.a".to_string(), accs[0].id()), + ("example.b".to_string(), accs[1].id()), + ]) + .await + .unwrap(); + let (_, configured) = store.get_local_and_configured_routes().await.unwrap(); + assert_eq!(configured.len(), 2); + assert_eq!(configured["example.a"].id(), accs[0].id()); + assert_eq!(configured["example.b"].id(), accs[1].id()); } diff --git a/crates/interledger-store/tests/redis/settlement_test.rs b/crates/interledger-store/tests/redis/settlement_test.rs index 0d1112215..15ea4c25c 100644 --- a/crates/interledger-store/tests/redis/settlement_test.rs +++ b/crates/interledger-store/tests/redis/settlement_test.rs @@ -1,6 +1,6 @@ use super::store_helpers::*; use bytes::Bytes; -use futures::future::{join_all, Future}; + use http::StatusCode; use interledger_api::NodeStore; use interledger_service::{Account, AccountStore}; @@ -10,7 +10,7 @@ use interledger_settlement::core::{ }; use lazy_static::lazy_static; use num_bigint::BigUint; -use redis_crate::{aio::SharedConnection, cmd}; +use redis_crate::cmd; use url::Url; use uuid::Uuid; @@ -18,387 +18,250 @@ lazy_static! { static ref IDEMPOTENCY_KEY: String = String::from("AJKJNUjM0oyiAN46"); } -#[test] -fn saves_and_gets_uncredited_settlement_amount_properly() { - block_on(test_store().and_then(|(store, context, _accs)| { - let amounts = vec![ - (BigUint::from(5u32), 11), // 5 - (BigUint::from(855u32), 12), // 905 - (BigUint::from(1u32), 10), // 1005 total - ]; - let acc = Uuid::new_v4(); - let mut f = Vec::new(); - for a in amounts { - let s = store.clone(); - f.push(s.save_uncredited_settlement_amount(acc, a)); - } - join_all(f) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .load_uncredited_settlement_amount(acc, 9) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |ret| { - // 1 uncredited unit for scale 9 - assert_eq!(ret, BigUint::from(1u32)); - // rest should be in the leftovers store - store - .get_uncredited_settlement_amount(acc) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |ret| { - // 1 uncredited unit for scale 9 - assert_eq!(ret, (BigUint::from(5u32), 12)); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() -} +#[tokio::test] +async fn saves_gets_clears_uncredited_settlement_amount_properly() { + let (store, _context, _accs) = test_store().await.unwrap(); + let amounts: Vec<(BigUint, u8)> = vec![ + (BigUint::from(5u32), 11), // 5 + (BigUint::from(855u32), 12), // 905 + (BigUint::from(1u32), 10), // 1005 total + ]; + let acc = Uuid::new_v4(); + for a in amounts { + let s = store.clone(); + s.save_uncredited_settlement_amount(acc, a).await.unwrap(); + } + let ret = store + .load_uncredited_settlement_amount(acc, 9u8) + .await + .unwrap(); + // 1 uncredited unit for scale 9 + assert_eq!(ret, BigUint::from(1u32)); + // rest should be in the leftovers store + let ret = store.get_uncredited_settlement_amount(acc).await.unwrap(); + // 1 uncredited unit for scale 9 + assert_eq!(ret, (BigUint::from(5u32), 12)); -#[test] -fn clears_uncredited_settlement_amount_properly() { - block_on(test_store().and_then(|(store, context, _accs)| { - let amounts = vec![ - (BigUint::from(5u32), 11), // 5 - (BigUint::from(855u32), 12), // 905 - (BigUint::from(1u32), 10), // 1005 total - ]; - let acc = Uuid::new_v4(); - let mut f = Vec::new(); - for a in amounts { - let s = store.clone(); - f.push(s.save_uncredited_settlement_amount(acc, a)); - } - join_all(f) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .clear_uncredited_settlement_amount(acc) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .get_uncredited_settlement_amount(acc) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |amount| { - assert_eq!(amount, (BigUint::from(0u32), 0)); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() + // clears uncredited amount + store.clear_uncredited_settlement_amount(acc).await.unwrap(); + let ret = store.get_uncredited_settlement_amount(acc).await.unwrap(); + assert_eq!(ret, (BigUint::from(0u32), 0)); } -#[test] -fn credits_prepaid_amount() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context.async_connection().and_then(move |conn| { - store - .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |(_conn, (balance, prepaid_amount)): (_, (i64, i64))| { - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 100); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn credits_prepaid_amount() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.async_connection().await.unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 100); } -#[test] -fn saves_and_loads_idempotency_key_data_properly() { - block_on(test_store().and_then(|(store, context, _accs)| { - let input_hash: [u8; 32] = Default::default(); - store - .save_idempotent_data( - IDEMPOTENCY_KEY.clone(), - input_hash, - StatusCode::OK, - Bytes::from("TEST"), - ) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |_| { - store - .load_idempotent_data(IDEMPOTENCY_KEY.clone()) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |data1| { - assert_eq!( - data1.unwrap(), - IdempotentData::new(StatusCode::OK, Bytes::from("TEST"), input_hash) - ); - let _ = context; +#[tokio::test] +async fn saves_and_loads_idempotency_key_data_properly() { + let (store, _context, _) = test_store().await.unwrap(); + let input_hash: [u8; 32] = Default::default(); + store + .save_idempotent_data( + IDEMPOTENCY_KEY.clone(), + input_hash, + StatusCode::OK, + Bytes::from("TEST"), + ) + .await + .unwrap(); + let data1 = store + .load_idempotent_data(IDEMPOTENCY_KEY.clone()) + .await + .unwrap(); + assert_eq!( + data1.unwrap(), + IdempotentData::new(StatusCode::OK, Bytes::from("TEST"), input_hash) + ); - store - .load_idempotent_data("asdf".to_string()) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |data2| { - assert!(data2.is_none()); - let _ = context; - Ok(()) - }) - }) - }) - })) - .unwrap(); + let data2 = store + .load_idempotent_data("asdf".to_string()) + .await + .unwrap(); + assert!(data2.is_none()); } -#[test] -fn idempotent_settlement_calls() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context.async_connection().and_then(move |conn| { - store - .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then(move |(conn, (balance, prepaid_amount)): (_, (i64, i64))| { - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 100); +#[tokio::test] +async fn idempotent_settlement_calls() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.async_connection().await.unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 100); - store - .update_balance_for_incoming_settlement( - id, - 100, - Some(IDEMPOTENCY_KEY.clone()), // Reuse key to make idempotent request. - ) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| eprintln!("Redis error: {:?}", err)) - .and_then( - move |(_conn, (balance, prepaid_amount)): ( - _, - (i64, i64), - )| { - // Since it's idempotent there - // will be no state update. - // Otherwise it'd be 200 (100 + 100) - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 100); - let _ = context; - Ok(()) - }, - ) - }) - }) - }) - }) - })) - .unwrap() + store + .update_balance_for_incoming_settlement( + id, + 100, + Some(IDEMPOTENCY_KEY.clone()), // Reuse key to make idempotent request. + ) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + // Since it's idempotent there + // will be no state update. + // Otherwise it'd be 200 (100 + 100) + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 100); } -#[test] -fn credits_balance_owed() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context - .shared_async_connection() - .map_err(|err| panic!(err)) - .and_then(move |conn| { - cmd("HSET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg(-200) - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then(move |(conn, _balance): (SharedConnection, i64)| { - store - .update_balance_for_incoming_settlement( - id, - 100, - Some(IDEMPOTENCY_KEY.clone()), - ) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then( - move |(_conn, (balance, prepaid_amount)): ( - _, - (i64, i64), - )| { - assert_eq!(balance, -100); - assert_eq!(prepaid_amount, 0); - let _ = context; - Ok(()) - }, - ) - }) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn credits_balance_owed() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.shared_async_connection().await.unwrap(); + let _balance: i64 = cmd("HSET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg(-200i64) + .query_async(&mut conn) + .await + .unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, -100); + assert_eq!(prepaid_amount, 0); } -#[test] -fn clears_balance_owed() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context - .shared_async_connection() - .map_err(|err| panic!(err)) - .and_then(move |conn| { - cmd("HSET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg(-100) - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then(move |(conn, _balance): (SharedConnection, i64)| { - store - .update_balance_for_incoming_settlement( - id, - 100, - Some(IDEMPOTENCY_KEY.clone()), - ) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then( - move |(_conn, (balance, prepaid_amount)): ( - _, - (i64, i64), - )| { - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 0); - let _ = context; - Ok(()) - }, - ) - }) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn clears_balance_owed() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.shared_async_connection().await.unwrap(); + let _balance: i64 = cmd("HSET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg(-100i64) + .query_async(&mut conn) + .await + .unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 0); } -#[test] -fn clears_balance_owed_and_puts_remainder_as_prepaid() { - block_on(test_store().and_then(|(store, context, accs)| { - let id = accs[0].id(); - context - .shared_async_connection() - .map_err(|err| panic!(err)) - .and_then(move |conn| { - cmd("HSET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg(-40) - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then(move |(conn, _balance): (SharedConnection, i64)| { - store - .update_balance_for_incoming_settlement( - id, - 100, - Some(IDEMPOTENCY_KEY.clone()), - ) - .and_then(move |_| { - cmd("HMGET") - .arg(format!("accounts:{}", id)) - .arg("balance") - .arg("prepaid_amount") - .query_async(conn) - .map_err(|err| panic!(err)) - .and_then( - move |(_conn, (balance, prepaid_amount)): ( - _, - (i64, i64), - )| { - assert_eq!(balance, 0); - assert_eq!(prepaid_amount, 60); - let _ = context; - Ok(()) - }, - ) - }) - }) - }) - })) - .unwrap() +#[tokio::test] +async fn clears_balance_owed_and_puts_remainder_as_prepaid() { + let (store, context, accs) = test_store().await.unwrap(); + let id = accs[0].id(); + let mut conn = context.shared_async_connection().await.unwrap(); + let _balance: i64 = cmd("HSET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg(-40) + .query_async(&mut conn) + .await + .unwrap(); + store + .update_balance_for_incoming_settlement(id, 100, Some(IDEMPOTENCY_KEY.clone())) + .await + .unwrap(); + let (balance, prepaid_amount): (i64, i64) = cmd("HMGET") + .arg(format!("accounts:{}", id)) + .arg("balance") + .arg("prepaid_amount") + .query_async(&mut conn) + .await + .unwrap(); + assert_eq!(balance, 0); + assert_eq!(prepaid_amount, 60); } -#[test] -fn loads_globally_configured_settlement_engine_url() { - block_on(test_store().and_then(|(store, context, accs)| { - assert!(accs[0].settlement_engine_details().is_some()); - assert!(accs[1].settlement_engine_details().is_none()); - let account_ids = vec![accs[0].id(), accs[1].id()]; - store - .clone() - .get_accounts(account_ids.clone()) - .and_then(move |accounts| { - assert!(accounts[0].settlement_engine_details().is_some()); - assert!(accounts[1].settlement_engine_details().is_none()); +#[tokio::test] +async fn loads_globally_configured_settlement_engine_url() { + let (store, _context, accs) = test_store().await.unwrap(); + assert!(accs[0].settlement_engine_details().is_some()); + assert!(accs[1].settlement_engine_details().is_none()); + let account_ids = vec![accs[0].id(), accs[1].id()]; + let accounts = store.get_accounts(account_ids.clone()).await.unwrap(); + assert!(accounts[0].settlement_engine_details().is_some()); + assert!(accounts[1].settlement_engine_details().is_none()); - store - .clone() - .set_settlement_engines(vec![ - ( - "ABC".to_string(), - Url::parse("http://settle-abc.example").unwrap(), - ), - ( - "XYZ".to_string(), - Url::parse("http://settle-xyz.example").unwrap(), - ), - ]) - .and_then(move |_| { - store.get_accounts(account_ids).and_then(move |accounts| { - // It should not overwrite the one that was individually configured - assert_eq!( - accounts[0] - .settlement_engine_details() - .unwrap() - .url - .as_str(), - "http://settlement.example/" - ); + store + .clone() + .set_settlement_engines(vec![ + ( + "ABC".to_string(), + Url::parse("http://settle-abc.example").unwrap(), + ), + ( + "XYZ".to_string(), + Url::parse("http://settle-xyz.example").unwrap(), + ), + ]) + .await + .unwrap(); + let accounts = store.get_accounts(account_ids).await.unwrap(); + // It should not overwrite the one that was individually configured + assert_eq!( + accounts[0] + .settlement_engine_details() + .unwrap() + .url + .as_str(), + "http://settlement.example/" + ); - // It should set the URL for the account that did not have one configured - assert!(accounts[1].settlement_engine_details().is_some()); - assert_eq!( - accounts[1] - .settlement_engine_details() - .unwrap() - .url - .as_str(), - "http://settle-abc.example/" - ); - let _ = context; - Ok(()) - }) - }) - // store.set_settlement_engines - }) - })) - .unwrap() + // It should set the URL for the account that did not have one configured + assert!(accounts[1].settlement_engine_details().is_some()); + assert_eq!( + accounts[1] + .settlement_engine_details() + .unwrap() + .url + .as_str(), + "http://settle-abc.example/" + ); } diff --git a/crates/interledger-stream/Cargo.toml b/crates/interledger-stream/Cargo.toml index 62c8fb064..e06d87fda 100644 --- a/crates/interledger-stream/Cargo.toml +++ b/crates/interledger-stream/Cargo.toml @@ -18,7 +18,7 @@ byteorder = { version = "1.3.2", default-features = false } chrono = { version = "0.4.9", default-features = false, features = ["clock"] } csv = { version = "1.1.1", default-features = false, optional = true } failure = { version = "0.1.5", default-features = false, features = ["derive"] } -futures = { version = "0.1.29", default-features = false } +futures = { version = "0.3.1", default-features = false } hex = { version = "0.4.0", default-features = false } interledger-ildcp = { path = "../interledger-ildcp", version = "^0.4.0", default-features = false } interledger-packet = { path = "../interledger-packet", version = "^0.4.0", features = ["serde"], default-features = false } @@ -27,8 +27,10 @@ log = { version = "0.4.8", default-features = false } parking_lot = { version = "0.9.0", default-features = false } ring = { version = "0.16.9", default-features = false } serde = { version = "1.0.101", default-features = false } -tokio = { version = "0.1.22", default-features = false, features = ["rt-full"] } +tokio = { version = "^0.2.6", default-features = false, features = ["rt-core", "macros"] } uuid = { version = "0.8.1", default-features = false, features = ["v4"] } +async-trait = "0.1.22" +pin-project = "0.4.7" [dev-dependencies] interledger-router = { path = "../interledger-router", version = "^0.4.0", default-features = false } diff --git a/crates/interledger-stream/src/client.rs b/crates/interledger-stream/src/client.rs index 4a5502316..2e4e68281 100644 --- a/crates/interledger-stream/src/client.rs +++ b/crates/interledger-stream/src/client.rs @@ -4,7 +4,7 @@ use super::error::Error; use super::packet::*; use bytes::Bytes; use bytes::BytesMut; -use futures::{Async, Future, Poll}; +use futures::{ready, TryFutureExt}; use interledger_ildcp::get_ildcp_info; use interledger_packet::{ Address, ErrorClass, ErrorCode as IlpErrorCode, Fulfill, PacketType as IlpPacketType, @@ -12,6 +12,7 @@ use interledger_packet::{ }; use interledger_service::*; use log::{debug, error, warn}; +use pin_project::{pin_project, project}; use serde::{Deserialize, Serialize}; use std::{ cell::Cell, @@ -19,27 +20,43 @@ use std::{ str, time::{Duration, Instant, SystemTime}, }; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; -// Maximum time we should wait since last fulfill before we error out to avoid -// getting into an infinite loop of sending packets and effectively DoSing ourselves +/// Maximum time we should wait since last fulfill before we error out to avoid +/// getting into an infinite loop of sending packets and effectively DoSing ourselves const MAX_TIME_SINCE_LAST_FULFILL: Duration = Duration::from_secs(30); +/// Metadata about a completed STREAM payment #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct StreamDelivery { + /// The sender's ILP Address pub from: Address, + /// The receiver's ILP Address pub to: Address, // StreamDelivery variables which we know ahead of time + /// The amount sent by the sender pub sent_amount: u64, + /// The sender's asset scale pub sent_asset_scale: u8, + /// The sender's asset code pub sent_asset_code: String, + /// The amount delivered to the receiver pub delivered_amount: u64, // StreamDelivery variables which may get updated if the receiver sends us a // ConnectionAssetDetails frame. + /// The asset scale delivered to the receiver + /// (this may change depending on the granularity of accounts across nodes) pub delivered_asset_scale: Option, + /// The asset code delivered to the receiver (this may happen in cross-currency payments) pub delivered_asset_code: Option, } impl StreamDelivery { + /// Increases the `StreamDelivery`'s [`delivered_amount`](./struct.StreamDelivery.html#structfield.delivered_amount) by `amount` fn increment_delivered_amount(&mut self, amount: u64) { self.delivered_amount += amount; } @@ -48,111 +65,134 @@ impl StreamDelivery { /// Send a given amount of money using the STREAM transport protocol. /// /// This returns the amount delivered, as reported by the receiver and in the receiver's asset's units. -pub fn send_money( +pub async fn send_money( service: S, from_account: &A, destination_account: Address, shared_secret: &[u8], source_amount: u64, -) -> impl Future +) -> Result<(StreamDelivery, S), Error> where - S: IncomingService + Clone, - A: Account, + S: IncomingService + Send + Sync + Clone + 'static, + A: Account + Send + Sync + 'static, { let shared_secret = Bytes::from(shared_secret); let from_account = from_account.clone(); // TODO can/should we avoid cloning the account? - get_ildcp_info(&mut service.clone(), from_account.clone()) + let account_details = get_ildcp_info(&mut service.clone(), from_account.clone()) .map_err(|_err| Error::ConnectionError("Unable to get ILDCP info: {:?}".to_string())) - .and_then(move |account_details| { - let source_account = account_details.ilp_address(); - if source_account.scheme() != destination_account.scheme() { - warn!("Destination ILP address starts with a different scheme prefix (\"{}\') than ours (\"{}\'), this probably isn't going to work", - destination_account.scheme(), - source_account.scheme()); - } + .await?; - SendMoneyFuture { - state: SendMoneyFutureState::SendMoney, - next: Some(service), - from_account: from_account.clone(), - source_account, - destination_account: destination_account.clone(), - shared_secret, - source_amount, - // Try sending the full amount first - // TODO make this configurable -- in different scenarios you might prioritize - // sending as much as possible per packet vs getting money flowing ASAP differently - congestion_controller: CongestionController::new(source_amount, source_amount / 10, 2.0), - pending_requests: Cell::new(Vec::new()), - receipt: StreamDelivery { - from: from_account.ilp_address().clone(), - to: destination_account, - sent_amount: source_amount, - sent_asset_scale: from_account.asset_scale(), - sent_asset_code: from_account.asset_code().to_string(), - delivered_asset_scale: None, - delivered_asset_code: None, - delivered_amount: 0, - }, - should_send_source_account: true, - sequence: 1, - rejected_packets: 0, - error: None, - last_fulfill_time: Instant::now(), - } - }) + let source_account = account_details.ilp_address(); + if source_account.scheme() != destination_account.scheme() { + warn!("Destination ILP address starts with a different scheme prefix (\"{}\') than ours (\"{}\'), this probably isn't going to work", + destination_account.scheme(), + source_account.scheme()); + } + + SendMoneyFuture { + state: SendMoneyFutureState::SendMoney, + next: Some(service), + from_account: from_account.clone(), + source_account, + destination_account: destination_account.clone(), + shared_secret, + source_amount, + // Try sending the full amount first + // TODO make this configurable -- in different scenarios you might prioritize + // sending as much as possible per packet vs getting money flowing ASAP differently + congestion_controller: CongestionController::new(source_amount, source_amount / 10, 2.0), + pending_requests: Cell::new(Vec::new()), + receipt: StreamDelivery { + from: from_account.ilp_address().clone(), + to: destination_account, + sent_amount: source_amount, + sent_asset_scale: from_account.asset_scale(), + sent_asset_code: from_account.asset_code().to_string(), + delivered_asset_scale: None, + delivered_asset_code: None, + delivered_amount: 0, + }, + should_send_source_account: true, + sequence: 1, + rejected_packets: 0, + error: None, + last_fulfill_time: Instant::now(), + } + .await } +#[pin_project] +/// Helper data type used to track a streaming payment struct SendMoneyFuture, A: Account> { + /// The future's [state](./enum.SendMoneyFutureState.html) state: SendMoneyFutureState, + /// The next service which will receive the Stream packet next: Option, + /// The account sending the STREAM payment from_account: A, + /// The ILP Address of the account sending the payment source_account: Address, + /// The ILP Address of the account receiving the payment destination_account: Address, + /// The shared secret generated by the sender and the receiver shared_secret: Bytes, + /// The amount sent by the sender source_amount: u64, + /// The [congestion controller](./../congestion/struct.CongestionController.html) for this stream congestion_controller: CongestionController, + /// STREAM packets we have sent and have not received responses yet for pending_requests: Cell>, + /// The [StreamDelivery](./struct.StreamDelivery.html) receipt of this stream receipt: StreamDelivery, + /// Boolean indicating if the source account should also be sent to the receiver should_send_source_account: bool, + /// The sequence number of this stream sequence: u64, + /// The amount of rejected packets by the stream rejected_packets: u64, + /// The STREAM error for this stream error: Option, + /// The last time a packet was fulfilled for this stream. last_fulfill_time: Instant, } struct PendingRequest { sequence: u64, amount: u64, - future: BoxedIlpFuture, + future: Pin + Send>>, } +/// The state of the send money future #[derive(PartialEq)] enum SendMoneyFutureState { - SendMoney, + /// Initial state of the future + SendMoney = 0, + /// Once the stream has been finished, it transitions to this state and tries to send a + /// ConnectionCloseFrame Closing, - // RemoteClosed, + /// The connection is now closed and the send_money function can return Closed, } +#[project] impl SendMoneyFuture where - S: IncomingService, - A: Account, + S: IncomingService + Send + Sync + Clone + 'static, + A: Account + Send + Sync + 'static, { + /// Fire off requests until the congestion controller tells us to stop or we've sent the total amount or maximum time since last fulfill has elapsed fn try_send_money(&mut self) -> Result { - // Fire off requests until the congestion controller tells us to stop or we've sent the total amount or maximum time since last fulfill has elapsed let mut sent_packets = false; loop { let amount = min( - self.source_amount, + *self.source_amount, self.congestion_controller.get_max_amount(), ); if amount == 0 { break; } - self.source_amount -= amount; + *self.source_amount -= amount; // Load up the STREAM packet let sequence = self.next_sequence(); @@ -160,7 +200,7 @@ where stream_id: 1, shares: 1, })]; - if self.should_send_source_account { + if *self.should_send_source_account { frames.push(Frame::ConnectionNewAddress(ConnectionNewAddressFrame { source_account: self.source_account.clone(), })); @@ -193,24 +233,27 @@ where // Send it! self.congestion_controller.prepare(amount); - if let Some(ref mut next) = self.next { - let send_request = next.handle_request(IncomingRequest { - from: self.from_account.clone(), - prepare, + if let Some(ref next) = self.next { + let mut next = next.clone(); + let from = self.from_account.clone(); + let request = Box::pin(async move { + next.handle_request(IncomingRequest { from, prepare }).await }); self.pending_requests.get_mut().push(PendingRequest { sequence, amount, - future: Box::new(send_request), + future: request, }); sent_packets = true; } else { panic!("Polled after finish"); } } + Ok(sent_packets) } + /// Sends a STREAM inside a Prepare packet with a ConnectionClose frame to the peer fn try_send_connection_close(&mut self) -> Result<(), Error> { let sequence = self.next_sequence(); let stream_packet = StreamPacketBuilder { @@ -236,15 +279,17 @@ where // Send it! debug!("Closing connection"); - if let Some(ref mut next) = self.next { - let send_request = next.handle_request(IncomingRequest { - from: self.from_account.clone(), - prepare, - }); + if let Some(ref next) = self.next { + let mut next = next.clone(); + let from = self.from_account.clone(); + let request = + Box::pin( + async move { next.handle_request(IncomingRequest { from, prepare }).await }, + ); self.pending_requests.get_mut().push(PendingRequest { sequence, amount: 0, - future: Box::new(send_request), + future: request, }); } else { panic!("Polled after finish"); @@ -252,39 +297,57 @@ where Ok(()) } - fn poll_pending_requests(&mut self) -> Poll<(), Error> { + fn poll_pending_requests(&mut self, cx: &mut Context<'_>) -> Poll> { let pending_requests = self.pending_requests.take(); let pending_requests = pending_requests .into_iter() - .filter_map(|mut pending_request| match pending_request.future.poll() { - Ok(Async::NotReady) => Some(pending_request), - Ok(Async::Ready(fulfill)) => { - self.handle_fulfill(pending_request.sequence, pending_request.amount, fulfill); - None - } - Err(reject) => { - self.handle_reject(pending_request.sequence, pending_request.amount, reject); - None - } - }) + .filter_map( + |mut pending_request| match pending_request.future.as_mut().poll(cx) { + Poll::Pending => Some(pending_request), + Poll::Ready(result) => { + match result { + Ok(fulfill) => { + self.handle_fulfill( + pending_request.sequence, + pending_request.amount, + fulfill, + ); + } + Err(reject) => { + self.handle_reject( + pending_request.sequence, + pending_request.amount, + reject, + ); + } + }; + None + } + }, + ) .collect(); self.pending_requests.set(pending_requests); if let Some(error) = self.error.take() { error!("Send money stopped because of error: {:?}", error); - Err(error) + Poll::Ready(Err(error)) } else if self.pending_requests.get_mut().is_empty() { - Ok(Async::Ready(())) + Poll::Ready(Ok(())) } else { - Ok(Async::NotReady) + Poll::Pending } } + /// Parses the provided Fulfill packet. + /// 1. Logs the fulfill in the congestion controller + /// 1. Updates the last fulfill time of the send money future + /// 1. It tries to aprse a Stream Packet inside the fulfill packet's data field + /// If successful, it increments the delivered amount by the Stream packet's prepare amount fn handle_fulfill(&mut self, sequence: u64, amount: u64, fulfill: Fulfill) { // TODO should we check the fulfillment and expiry or can we assume the plugin does that? self.congestion_controller.fulfill(amount); - self.should_send_source_account = false; - self.last_fulfill_time = Instant::now(); + *self.should_send_source_account = false; + *self.last_fulfill_time = Instant::now(); if let Ok(packet) = StreamPacket::from_encrypted(&self.shared_secret, fulfill.into_data()) { if packet.ilp_packet_type() == IlpPacketType::Fulfill { @@ -319,10 +382,19 @@ where ); } + /// Parses the provided Reject packet. + /// 1. Increases the source-amount which was deducted at the start of the [send_money](./fn.send_money.html) loop + /// 1. Logs the reject in the congestion controller + /// 1. Increments the rejected packets counter + /// 1. If the receipt's `delivered_asset` fields are not populated, it tries to parse + /// a Stream Packet inside the reject packet's data field to check if + /// there is a [`ConnectionAssetDetailsFrame`](./../packet/struct.ConnectionAssetDetailsFrame.html) frame. + /// If one is found, then it updates the receipt's `delivered_asset_scale` and `delivered_asset_code` + /// to them. fn handle_reject(&mut self, sequence: u64, amount: u64, reject: Reject) { - self.source_amount += amount; + *self.source_amount += amount; self.congestion_controller.reject(amount, &reject); - self.rejected_packets += 1; + *self.rejected_packets += 1; debug!( "Prepare {} with amount {} was rejected with code: {} ({} left to send)", sequence, @@ -363,7 +435,7 @@ where // TODO handle STREAM errors } _ => { - self.error = Some(Error::SendMoneyError(format!( + *self.error = Some(Error::SendMoneyError(format!( "Packet was rejected with error: {} {}", reject.code(), str::from_utf8(reject.message()).unwrap_or_default(), @@ -372,48 +444,47 @@ where } } + /// Increments the stream's sequence number and returns the updated value fn next_sequence(&mut self) -> u64 { - let seq = self.sequence; - self.sequence += 1; + let seq = *self.sequence; + *self.sequence += 1; seq } } impl Future for SendMoneyFuture where - S: IncomingService, - A: Account, + S: IncomingService + Send + Sync + Clone + 'static, + A: Account + Send + Sync + 'static, { - type Item = (StreamDelivery, S); - type Error = Error; + type Output = Result<(StreamDelivery, S), Error>; - fn poll(&mut self) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { // TODO maybe don't have loops here and in try_send_money + let mut this = self.project(); + loop { - self.poll_pending_requests()?; - if self.last_fulfill_time.elapsed() >= MAX_TIME_SINCE_LAST_FULFILL { - return Err(Error::TimeoutError(format!( + ready!(this.poll_pending_requests(cx)?); + if this.last_fulfill_time.elapsed() >= MAX_TIME_SINCE_LAST_FULFILL { + return Poll::Ready(Err(Error::TimeoutError(format!( "Time since last fulfill exceeded the maximum time limit of {:?} secs", - self.last_fulfill_time.elapsed().as_secs() - ))); + this.last_fulfill_time.elapsed().as_secs() + )))); } - if self.source_amount == 0 && self.pending_requests.get_mut().is_empty() { - if self.state == SendMoneyFutureState::SendMoney { - self.state = SendMoneyFutureState::Closing; - self.try_send_connection_close()?; + if *this.source_amount == 0 && this.pending_requests.get_mut().is_empty() { + if *this.state == SendMoneyFutureState::SendMoney { + *this.state = SendMoneyFutureState::Closing; + this.try_send_connection_close()?; } else { - self.state = SendMoneyFutureState::Closed; + *this.state = SendMoneyFutureState::Closed; debug!( - "Send money future finished. Delivered: {} ({} packets fulfilled, {} packets rejected)", self.receipt.delivered_amount, self.sequence - 1, self.rejected_packets, + "Send money future finished. Delivered: {} ({} packets fulfilled, {} packets rejected)", this.receipt.delivered_amount, *this.sequence - 1, this.rejected_packets, ); - return Ok(Async::Ready(( - self.receipt.clone(), - self.next.take().unwrap(), - ))); + return Poll::Ready(Ok((this.receipt.clone(), this.next.take().unwrap()))); } - } else if !self.try_send_money()? { - return Ok(Async::NotReady); + } else if !this.try_send_money()? { + return Poll::Pending; } } } @@ -431,8 +502,8 @@ mod send_money_tests { use std::sync::Arc; use uuid::Uuid; - #[test] - fn stops_at_final_errors() { + #[tokio::test] + async fn stops_at_final_errors() { let account = TestAccount { id: Uuid::new_v4(), asset_code: "XYZ".to_string(), @@ -457,7 +528,7 @@ mod send_money_tests { &[0; 32][..], 100, ) - .wait(); + .await; assert!(result.is_err()); assert_eq!(requests.lock().len(), 1); } diff --git a/crates/interledger-stream/src/congestion.rs b/crates/interledger-stream/src/congestion.rs index 3138c55a6..2f9eb676d 100644 --- a/crates/interledger-stream/src/congestion.rs +++ b/crates/interledger-stream/src/congestion.rs @@ -17,11 +17,19 @@ use std::io; /// control algorithms. pub struct CongestionController { state: CongestionState, + /// Amount which is added to `max_in_flight` per fulfill increase_amount: u64, + /// Divide `max_in_flight` by this factor per reject with code for insufficient liquidity + /// or if there is no `max_packet_amount` specified decrease_factor: f64, + /// The maximum amount we are allowed to add in a packet. This gets automatically set if + /// we receive a reject packet with a `F08_AMOUNT_TOO_LARGE` error max_packet_amount: Option, + /// The current amount in flight amount_in_flight: u64, + /// The maximum allowed amount to be in flight max_in_flight: u64, + /// Writer object to write our metrics to a csv #[cfg(feature = "metrics_csv")] csv_writer: csv::Writer, } @@ -33,6 +41,7 @@ enum CongestionState { } impl CongestionController { + /// Constructs a new congestion controller pub fn new(start_amount: u64, increase_amount: u64, decrease_factor: f64) -> Self { #[cfg(feature = "metrics_csv")] let mut csv_writer = csv::Writer::from_writer(io::stdout()); @@ -53,6 +62,7 @@ impl CongestionController { } } + /// The maximum amount which can be sent is the maximum amount in flight minus the current amount in flight pub fn get_max_amount(&mut self) -> u64 { if self.amount_in_flight > self.max_in_flight { return 0; @@ -66,6 +76,7 @@ impl CongestionController { } } + /// Increments the amount in flight by the provided amount pub fn prepare(&mut self, amount: u64) { if amount > 0 { self.amount_in_flight += amount; @@ -76,6 +87,8 @@ impl CongestionController { } } + /// Decrements the amount in flight by the provided amount + /// Increases the allowed max in flight amount cap pub fn fulfill(&mut self, prepare_amount: u64) { self.amount_in_flight -= prepare_amount; @@ -111,6 +124,8 @@ impl CongestionController { self.log_stats(prepare_amount); } + /// Decrements the amount in flight by the provided amount + /// Decreases the allowed max in flight amount cap pub fn reject(&mut self, prepare_amount: u64, reject: &Reject) { self.amount_in_flight -= prepare_amount; diff --git a/crates/interledger-stream/src/crypto.rs b/crates/interledger-stream/src/crypto.rs index 03c9727aa..657c8142c 100644 --- a/crates/interledger-stream/src/crypto.rs +++ b/crates/interledger-stream/src/crypto.rs @@ -8,9 +8,12 @@ use ring::{aead, digest, hmac}; const NONCE_LENGTH: usize = 12; const AUTH_TAG_LENGTH: usize = 16; +/// Protocol specific string for encryption static ENCRYPTION_KEY_STRING: &[u8] = b"ilp_stream_encryption"; +/// Protocol specific string for generating fulfillments static FULFILLMENT_GENERATION_STRING: &[u8] = b"ilp_stream_fulfillment"; +/// Returns the HMAC-SHA256 of the provided message using the provided **secret** key pub fn hmac_sha256(key: &[u8], message: &[u8]) -> [u8; 32] { let key = hmac::Key::new(hmac::HMAC_SHA256, key); let output = hmac::sign(&key, message); @@ -19,11 +22,17 @@ pub fn hmac_sha256(key: &[u8], message: &[u8]) -> [u8; 32] { to_return } +/// The fulfillment is generated by HMAC-256'ing the data with a secret key. +/// The secret key is generated deterministically by HMAC-256'ing the shared secret +/// and the hardcoded string "ilp_stream_fulfillment" pub fn generate_fulfillment(shared_secret: &[u8], data: &[u8]) -> [u8; 32] { + // generate the key as defined in the specificatoin let key = hmac_sha256(&shared_secret[..], &FULFILLMENT_GENERATION_STRING); + // return the hmac-sha256 of the data based on the generated key hmac_sha256(&key[..], &data[..]) } +/// Returns a 32-byte sha256 digest of the provided preimage pub fn hash_sha256(preimage: &[u8]) -> [u8; 32] { let output = digest::digest(&digest::SHA256, &preimage[..]); let mut to_return: [u8; 32] = [0; 32]; @@ -31,11 +40,15 @@ pub fn hash_sha256(preimage: &[u8]) -> [u8; 32] { to_return } +/// The fulfillment condition is the 32-byte sha256 of the fulfillment +/// generated by the provided shared secret and data via the +/// [generate_fulfillment](./fn.generate_fulfillment.html) function pub fn generate_condition(shared_secret: &[u8], data: &[u8]) -> [u8; 32] { let fulfillment = generate_fulfillment(&shared_secret, &data); hash_sha256(&fulfillment) } +/// Returns a random 32 byte number using [SystemRandom::new()](../../ring/rand/struct.SystemRandom.html#method.new) pub fn random_condition() -> [u8; 32] { let mut condition_slice: [u8; 32] = [0; 32]; SystemRandom::new() @@ -44,6 +57,8 @@ pub fn random_condition() -> [u8; 32] { condition_slice } +/// Returns a random 18 byte number using +/// [SystemRandom::new()](../../ring/rand/struct.SystemRandom.html#method.new) pub fn generate_token() -> [u8; 18] { let mut token: [u8; 18] = [0; 18]; SystemRandom::new() @@ -52,6 +67,9 @@ pub fn generate_token() -> [u8; 18] { token } +/// Encrypts a plaintext by calling [encrypt_with_nonce](./fn.encrypt_with_nonce.html) +/// with a random nonce of [`NONCE_LENGTH`](./constant.NONCE_LENGTH.html) generated using +/// [SystemRandom::new()](../../ring/rand/struct.SystemRandom.html#method.new) pub fn encrypt(shared_secret: &[u8], plaintext: BytesMut) -> BytesMut { // Generate a random nonce or IV let mut nonce: [u8; NONCE_LENGTH] = [0; NONCE_LENGTH]; @@ -62,6 +80,15 @@ pub fn encrypt(shared_secret: &[u8], plaintext: BytesMut) -> BytesMut { encrypt_with_nonce(shared_secret, plaintext, nonce) } +/// Encrypts a plaintext with a nonce by using AES256-GCM. +/// +/// A secret key is generated deterministically by HMAC-256'ing the `shared_secret` +/// and the hardcoded string "ilp_stream_encryption" +/// +/// The `additional_data` field is left empty. +/// +/// The ciphertext can be decrypted by calling the [`decrypt`](./fn.decrypt.html) function with the +/// same `shared_secret`. fn encrypt_with_nonce( shared_secret: &[u8], mut plaintext: BytesMut, @@ -96,6 +123,15 @@ fn encrypt_with_nonce( nonce_tag_data } +/// Decrypts a AES256-GCM encrypted ciphertext. +/// +/// The secret key is generated deterministically by HMAC-256'ing the `shared_secret` +/// and the hardcoded string "ilp_stream_encryption" +/// +/// The `additional_data` field is left empty. +/// +/// The nonce and auth tag are extracted from the first 12 and 16 bytes +/// of the ciphertext. pub fn decrypt(shared_secret: &[u8], mut ciphertext: BytesMut) -> Result { // ciphertext must include at least a nonce and tag, if ciphertext.len() < AUTH_TAG_LENGTH { diff --git a/crates/interledger-stream/src/error.rs b/crates/interledger-stream/src/error.rs index 60d92ba2d..8ef6ff586 100644 --- a/crates/interledger-stream/src/error.rs +++ b/crates/interledger-stream/src/error.rs @@ -1,5 +1,6 @@ use failure::Fail; +/// Stream Errors #[derive(Fail, Debug)] pub enum Error { #[fail(display = "Error connecting: {}", _0)] diff --git a/crates/interledger-stream/src/lib.rs b/crates/interledger-stream/src/lib.rs index 7e0c048ac..c0337705c 100644 --- a/crates/interledger-stream/src/lib.rs +++ b/crates/interledger-stream/src/lib.rs @@ -4,11 +4,17 @@ //! //! STREAM is responsible for splitting larger payments and messages into smaller chunks of money and data, and sending them over ILP. +/// Stream client mod client; +/// Congestion controller consumed by the [stream client](./client/fn.send_money.html) mod congestion; +/// Cryptographic utilities for generating fulfillments and encrypting/decrypting STREAM packets mod crypto; +/// Stream errors mod error; +/// Stream Packet implementation, [as specified in the RFC](https://interledger.org/rfcs/0029-stream/#5-packet-and-frame-specification) mod packet; +/// A stream server implementing an [Outgoing Service](../interledger_service/trait.OutgoingService.html) for receiving STREAM payments from peers mod server; pub use client::{send_money, StreamDelivery}; @@ -20,7 +26,8 @@ pub use server::{ #[cfg(test)] pub mod test_helpers { use super::*; - use futures::{future::ok, sync::mpsc::UnboundedSender, Future}; + use async_trait::async_trait; + use futures::channel::mpsc::UnboundedSender; use interledger_packet::Address; use interledger_router::RouterStore; use interledger_service::{Account, AccountStore, AddressStore, Username}; @@ -88,22 +95,17 @@ pub mod test_helpers { pub route: (String, TestAccount), } + #[async_trait] impl AccountStore for TestStore { type Account = TestAccount; - fn get_accounts( - &self, - _account_ids: Vec, - ) -> Box, Error = ()> + Send> { - Box::new(ok(vec![self.route.1.clone()])) + async fn get_accounts(&self, _account_ids: Vec) -> Result, ()> { + Ok(vec![self.route.1.clone()]) } // stub implementation (not used in these tests) - fn get_account_id_from_username( - &self, - _username: &Username, - ) -> Box + Send> { - Box::new(ok(Uuid::new_v4())) + async fn get_account_id_from_username(&self, _username: &Username) -> Result { + Ok(Uuid::new_v4()) } } @@ -115,16 +117,14 @@ pub mod test_helpers { } } + #[async_trait] impl AddressStore for TestStore { /// Saves the ILP Address in the store's memory and database - fn set_ilp_address( - &self, - _ilp_address: Address, - ) -> Box + Send> { + async fn set_ilp_address(&self, _ilp_address: Address) -> Result<(), ()> { unimplemented!() } - fn clear_ilp_address(&self) -> Box + Send> { + async fn clear_ilp_address(&self) -> Result<(), ()> { unimplemented!() } @@ -140,18 +140,16 @@ mod send_money_to_receiver { use super::test_helpers::*; use super::*; use bytes::Bytes; - use futures::Future; use interledger_ildcp::IldcpService; use interledger_packet::Address; use interledger_packet::{ErrorCode, RejectBuilder}; use interledger_router::Router; use interledger_service::outgoing_service_fn; use std::str::FromStr; - use tokio::runtime::Runtime; use uuid::Uuid; - #[test] - fn send_money_test() { + #[tokio::test] + async fn send_money_test() { let server_secret = Bytes::from(&[0; 32][..]); let destination_address = Address::from_str("example.receiver").unwrap(); let account = TestAccount { @@ -184,7 +182,7 @@ mod send_money_to_receiver { connection_generator.generate_address_and_secret(&destination_address); let destination_address = Address::from_str("example.receiver").unwrap(); - let run = send_money( + let (receipt, _service) = send_money( server, &test_helpers::TestAccount { id: Uuid::new_v4(), @@ -196,12 +194,9 @@ mod send_money_to_receiver { &shared_secret[..], 100, ) - .and_then(|(receipt, _service)| { - assert_eq!(receipt.delivered_amount, 100); - Ok(()) - }) - .map_err(|err| panic!(err)); - let runtime = Runtime::new().unwrap(); - runtime.block_on_all(run).unwrap(); + .await + .unwrap(); + + assert_eq!(receipt.delivered_amount, 100); } } diff --git a/crates/interledger-stream/src/packet.rs b/crates/interledger-stream/src/packet.rs index 59a78f5f7..cc77d0bb1 100644 --- a/crates/interledger-stream/src/packet.rs +++ b/crates/interledger-stream/src/packet.rs @@ -10,16 +10,25 @@ use lazy_static::lazy_static; use log::warn; use std::{convert::TryFrom, fmt, str, u64}; +/// The Stream Protocol's version const STREAM_VERSION: u8 = 1; +/// Builder for [Stream Packets](https://interledger.org/rfcs/0029-stream/#52-stream-packet) pub struct StreamPacketBuilder<'a> { + /// The stream packet's sequence number pub sequence: u64, + /// The [ILP Packet Type](../interledger_packet/enum.PacketType.html) pub ilp_packet_type: IlpPacketType, + /// Destination amount of the ILP Prepare, used for enforcing minimum exchange rates and congestion control. + /// Within an ILP Prepare, represents the minimum amount the recipient needs to receive in order to fulfill the packet. + /// Within an ILP Fulfill or ILP Reject, represents the amount received by the recipient. pub prepare_amount: u64, + /// The stream frames pub frames: &'a [Frame<'a>], } impl<'a> StreamPacketBuilder<'a> { + /// Serializes the builder into a Stream Packet pub fn build(&self) -> StreamPacket { // TODO predict length first let mut buffer_unencrypted = Vec::new(); @@ -96,7 +105,7 @@ impl<'a> StreamPacketBuilder<'a> { } StreamPacket { - buffer_unencrypted: BytesMut::from(buffer_unencrypted), + buffer_unencrypted: BytesMut::from(&buffer_unencrypted[..]), sequence: self.sequence, ilp_packet_type: self.ilp_packet_type, prepare_amount: self.prepare_amount, @@ -105,16 +114,31 @@ impl<'a> StreamPacketBuilder<'a> { } } +/// A Stream Packet as specified in its [ASN.1 definition](https://interledger.org/rfcs/asn1/Stream.asn) #[derive(PartialEq, Clone)] pub struct StreamPacket { + /// The cleartext serialized packet pub(crate) buffer_unencrypted: BytesMut, + /// The packet's sequence number sequence: u64, + /// The [ILP Packet Type](../interledger_packet/enum.PacketType.html) ilp_packet_type: IlpPacketType, + /// Destination amount of the ILP Prepare, used for enforcing minimum exchange rates and congestion control. + /// Within an ILP Prepare, represents the minimum amount the recipient needs to receive in order to fulfill the packet. + /// Within an ILP Fulfill or ILP Reject, represents the amount received by the recipient. prepare_amount: u64, + /// The offset after which frames can be found inside the `buffer_unencrypted` field frames_offset: usize, } impl StreamPacket { + /// Constructs a [Stream Packet](./struct.StreamPacket.html) from an encrypted buffer + /// and a shared secret + /// + /// # Errors + /// 1. If the version of Stream Protocol doesn't match the hardcoded [stream version](constant.STREAM_VERSION.html) + /// 1. If the decryption fails + /// 1. If the decrypted bytes cannot be parsed to an unencrypted [Stream Packet](./struct.StreamPacket.html) pub fn from_encrypted(shared_secret: &[u8], ciphertext: BytesMut) -> Result { // TODO handle decryption failure let decrypted = decrypt(shared_secret, ciphertext) @@ -122,6 +146,11 @@ impl StreamPacket { StreamPacket::from_bytes_unencrypted(decrypted) } + /// Constructs a [Stream Packet](./struct.StreamPacket.html) from a buffer + /// + /// # Errors + /// 1. If the version of Stream Protocol doesn't match the hardcoded [stream version](constant.STREAM_VERSION.html) + /// 1. If the decrypted bytes cannot be parsed to an unencrypted [Stream Packet](./struct.StreamPacket.html) fn from_bytes_unencrypted(buffer_unencrypted: BytesMut) -> Result { // TODO don't copy the whole packet again let mut reader = &buffer_unencrypted[..]; @@ -161,22 +190,28 @@ impl StreamPacket { } } + /// Consumes the packet and a shared secret and returns a serialized encrypted + /// Stream packet pub fn into_encrypted(self, shared_secret: &[u8]) -> BytesMut { encrypt(shared_secret, self.buffer_unencrypted) } + /// The packet's sequence number pub fn sequence(&self) -> u64 { self.sequence } + /// The packet's [type](../interledger_packet/enum.PacketType.html) pub fn ilp_packet_type(&self) -> IlpPacketType { self.ilp_packet_type } + /// Destination amount of the ILP Prepare, used for enforcing minimum exchange rates and congestion control. pub fn prepare_amount(&self) -> u64 { self.prepare_amount } + /// Returns a [FrameIterator](./struct.FrameIterator.html) over the packet's [frames](./enum.Frame.html) pub fn frames(&self) -> FrameIterator { FrameIterator { buffer: &self.buffer_unencrypted[self.frames_offset..], @@ -197,11 +232,14 @@ impl fmt::Debug for StreamPacket { } } +/// Iterator over a serialized Frame to support zero-copy deserialization pub struct FrameIterator<'a> { buffer: &'a [u8], } impl<'a> FrameIterator<'a> { + /// Reads a u8 from the iterator's buffer, and depending on the type it returns + /// a [`Frame`](./enum.Frame.html) fn try_read_next_frame(&mut self) -> Result, ParseError> { let frame_type = self.buffer.read_u8()?; let contents: &'a [u8] = self.buffer.read_var_octet_string()?; @@ -291,6 +329,7 @@ impl<'a> fmt::Debug for FrameIterator<'a> { } } +/// Enum around the different Stream Frame types #[derive(PartialEq, Clone)] pub enum Frame<'a> { ConnectionClose(ConnectionCloseFrame<'a>), @@ -332,6 +371,7 @@ impl<'a> fmt::Debug for Frame<'a> { } } +/// The Stream Frame types [as defined in the RFC](https://interledger.org/rfcs/0029-stream/#53-frames) #[derive(Debug, PartialEq, Clone)] #[repr(u8)] pub enum FrameType { @@ -351,6 +391,7 @@ pub enum FrameType { StreamDataBlocked = 0x16, Unknown, } + impl From for FrameType { fn from(num: u8) -> Self { match num { @@ -373,6 +414,7 @@ impl From for FrameType { } } +/// The STREAM Error Codes [as defined in the RFC](https://interledger.org/rfcs/0029-stream/#54-error-codes) #[derive(Debug, PartialEq, Clone)] #[repr(u8)] pub enum ErrorCode { @@ -404,15 +446,20 @@ impl From for ErrorCode { } } +/// Helper trait for having a common interface to read/write on Frames pub trait SerializableFrame<'a>: Sized { fn put_contents(&self, buf: &mut impl MutBufOerExt) -> (); fn read_contents(reader: &'a [u8]) -> Result; } +/// Frame after which a connection must be closed. +/// If implementations allow half-open connections, an endpoint may continue sending packets after receiving a ConnectionClose frame. #[derive(Debug, PartialEq, Clone)] pub struct ConnectionCloseFrame<'a> { + /// Machine-readable [Error Code](./enum.ErrorCode.html) indicating why the connection was closed. pub code: ErrorCode, + /// Human-readable string intended to give more information helpful for debugging purposes. pub message: &'a str, } @@ -431,8 +478,10 @@ impl<'a> SerializableFrame<'a> for ConnectionCloseFrame<'a> { } } +/// Frame which contains the sender of the Stream payment #[derive(PartialEq, Clone)] pub struct ConnectionNewAddressFrame { + /// New ILP address of the endpoint that sent the frame. pub source_account: Address, } @@ -460,9 +509,13 @@ impl<'a> fmt::Debug for ConnectionNewAddressFrame { } } +/// The assets being transported in this Stream payment +/// Asset details exposed by this frame MUST NOT change during the lifetime of a Connection. #[derive(Debug, PartialEq, Clone)] pub struct ConnectionAssetDetailsFrame<'a> { + /// Asset code of endpoint that sent the frame. pub source_asset_code: &'a str, + /// Asset scale of endpoint that sent the frame. pub source_asset_scale: u8, } @@ -483,8 +536,10 @@ impl<'a> SerializableFrame<'a> for ConnectionAssetDetailsFrame<'a> { } } +/// Endpoints MUST NOT exceed the total number of bytes the other endpoint is willing to accept. #[derive(Debug, PartialEq, Clone)] pub struct ConnectionMaxDataFrame { + /// The total number of bytes the endpoint is willing to receive on this connection. pub max_offset: u64, } @@ -500,8 +555,10 @@ impl<'a> SerializableFrame<'a> for ConnectionMaxDataFrame { } } +/// Frame specifying the amount of data which is going to be sent #[derive(Debug, PartialEq, Clone)] pub struct ConnectionDataBlockedFrame { + /// The total number of bytes the endpoint wants to send. pub max_offset: u64, } @@ -517,8 +574,10 @@ impl<'a> SerializableFrame<'a> for ConnectionDataBlockedFrame { } } +/// Frame specifying the maximum stream ID the endpoint is willing to accept. #[derive(Debug, PartialEq, Clone)] pub struct ConnectionMaxStreamIdFrame { + /// The maximum stream ID the endpoint is willing to accept. pub max_stream_id: u64, } @@ -534,8 +593,10 @@ impl<'a> SerializableFrame<'a> for ConnectionMaxStreamIdFrame { } } +/// Frame specifying the maximum stream ID the endpoint wishes to open. #[derive(Debug, PartialEq, Clone)] pub struct ConnectionStreamIdBlockedFrame { + /// The maximum stream ID the endpoint wishes to open. pub max_stream_id: u64, } @@ -551,10 +612,16 @@ impl<'a> SerializableFrame<'a> for ConnectionStreamIdBlockedFrame { } } +/// Endpoints MUST close the stream after receiving this stream immediately. +/// If implementations allow half-open streams, an endpoint MAY continue sending +/// money or data for this stream after receiving a StreamClose frame. #[derive(Debug, PartialEq, Clone)] pub struct StreamCloseFrame<'a> { + /// Identifier of the stream this frame refers to. pub stream_id: u64, + /// Machine-readable [Error Code](./enum.ErrorCode.html) indicating why the connection was closed. pub code: ErrorCode, + /// Human-readable string intended to give more information helpful for debugging purposes. pub message: &'a str, } @@ -579,9 +646,25 @@ impl<'a> SerializableFrame<'a> for StreamCloseFrame<'a> { } } +/// Frame specifying the amount of money that should go to each stream +/// +/// The amount of money that should go to each stream is calculated by +/// dividing the number of shares for the given stream by the total number +/// of shares in all of the StreamMoney frames in the packet. +/// +/// For example, if an ILP Prepare packet has an amount of 100 and three +/// StreamMoney frames with 5, 15, and 30 shares for streams 2, 4, and 6, +/// respectively, that would indicate that stream 2 should get 10 units, +/// stream 4 gets 30 units, and stream 6 gets 60 units. +/// If the Prepare amount is not divisible by the total number of shares, +/// stream amounts are rounded down. +/// +/// The remainder is be allocated to the lowest-numbered open stream that has not reached its maximum receive amount. #[derive(Debug, PartialEq, Clone)] pub struct StreamMoneyFrame { + /// Identifier of the stream this frame refers to. pub stream_id: u64, + /// Proportion of the ILP Prepare amount destined for the stream specified. pub shares: u64, } @@ -599,10 +682,21 @@ impl<'a> SerializableFrame<'a> for StreamMoneyFrame { } } +/// Specifies the max amount of money the endpoint wants to send +/// +/// The amounts in this frame are denominated in the units of the +/// endpoint sending the frame, so the other endpoint must use their +/// calculated exchange rate to determine how much more they can send +/// for this stream. #[derive(Debug, PartialEq, Clone)] pub struct StreamMaxMoneyFrame { + /// Identifier of the stream this frame refers to. pub stream_id: u64, + /// Total amount, denominated in the units of the endpoint + /// sending this frame, that the endpoint is willing to receive on this stream. pub receive_max: u64, + /// Total amount, denominated in the units of the endpoint + /// sending this frame, that the endpoint has received thus far. pub total_received: u64, } @@ -626,10 +720,16 @@ impl<'a> SerializableFrame<'a> for StreamMaxMoneyFrame { } } +/// Frame specifying the maximum amount of money the sending endpoint will send #[derive(Debug, PartialEq, Clone)] pub struct StreamMoneyBlockedFrame { + /// Identifier of the stream this frame refers to. pub stream_id: u64, + /// Total amount, denominated in the units of the endpoint + /// sending this frame, that the endpoint wants to send. pub send_max: u64, + /// Total amount, denominated in the units of the endpoint + /// sending this frame, that the endpoint has sent already. pub total_sent: u64, } @@ -653,10 +753,29 @@ impl<'a> SerializableFrame<'a> for StreamMoneyBlockedFrame { } } +/// Packets may be received out of order so the Offset is used to +/// indicate the correct position of the byte segment in the overall stream. +/// The first StreamData frame sent for a given stream MUST start with an Offset of zero. + +/// Fragments of data provided by a stream's StreamData frames +/// MUST NOT ever overlap with one another. For example, the following combination +/// of frames is forbidden because bytes 15-19 were provided twice: +/// +/// ```ignore +/// StreamData { StreamID: 1, Offset: 10, Data: "1234567890" } +/// StreamData { StreamID: 1, Offset: 15, Data: "67890" } +/// ``` +/// +/// In other words, if a sender resends data (e.g. because a packet was lost), +/// it MUST resend the exact frames — offset and data. +/// This rule exists to simplify data reassembly for the receiver #[derive(Debug, PartialEq, Clone)] pub struct StreamDataFrame<'a> { + /// Identifier of the stream this frame refers to. pub stream_id: u64, + /// Position of this data in the byte stream. pub offset: u64, + /// Application data pub data: &'a [u8], } @@ -680,9 +799,12 @@ impl<'a> SerializableFrame<'a> for StreamDataFrame<'a> { } } +/// The maximum amount of data the endpoint is willing to receive on this stream #[derive(Debug, PartialEq, Clone)] pub struct StreamMaxDataFrame { + /// Identifier of the stream this frame refers to. pub stream_id: u64, + /// The total number of bytes the endpoint is willing to receive on this stream. pub max_offset: u64, } @@ -703,9 +825,12 @@ impl<'a> SerializableFrame<'a> for StreamMaxDataFrame { } } +/// The maximum amount of data the endpoint is willing to send on this stream #[derive(Debug, PartialEq, Clone)] pub struct StreamDataBlockedFrame { + /// Identifier of the stream this frame refers to. pub stream_id: u64, + /// The total number of bytes the endpoint wants to send on this stream. pub max_offset: u64, } @@ -801,14 +926,16 @@ mod serialization { ] } .build(); - static ref SERIALIZED: BytesMut = BytesMut::from(vec![ - 1, 12, 1, 1, 1, 99, 1, 14, 1, 5, 1, 3, 111, 111, 112, 2, 13, 12, 101, 120, 97, 109, - 112, 108, 101, 46, 98, 108, 97, 104, 3, 3, 2, 3, 232, 4, 3, 2, 7, 208, 5, 3, 2, 11, - 184, 6, 3, 2, 15, 160, 7, 5, 3, 88, 89, 90, 9, 16, 8, 1, 76, 2, 4, 98, 108, 97, 104, - 17, 4, 1, 88, 1, 99, 18, 8, 1, 11, 2, 3, 219, 2, 1, 244, 19, 8, 1, 66, 2, 78, 32, 2, - 23, 112, 20, 11, 1, 34, 2, 35, 40, 5, 104, 101, 108, 108, 111, 21, 5, 1, 35, 2, 34, 62, - 22, 6, 2, 3, 120, 2, 173, 156 - ]); + static ref SERIALIZED: BytesMut = BytesMut::from( + &vec![ + 1, 12, 1, 1, 1, 99, 1, 14, 1, 5, 1, 3, 111, 111, 112, 2, 13, 12, 101, 120, 97, 109, + 112, 108, 101, 46, 98, 108, 97, 104, 3, 3, 2, 3, 232, 4, 3, 2, 7, 208, 5, 3, 2, 11, + 184, 6, 3, 2, 15, 160, 7, 5, 3, 88, 89, 90, 9, 16, 8, 1, 76, 2, 4, 98, 108, 97, + 104, 17, 4, 1, 88, 1, 99, 18, 8, 1, 11, 2, 3, 219, 2, 1, 244, 19, 8, 1, 66, 2, 78, + 32, 2, 23, 112, 20, 11, 1, 34, 2, 35, 40, 5, 104, 101, 108, 108, 111, 21, 5, 1, 35, + 2, 34, 62, 22, 6, 2, 3, 120, 2, 173, 156 + ][..] + ); } #[test] diff --git a/crates/interledger-stream/src/server.rs b/crates/interledger-stream/src/server.rs index 890489969..0ba842bee 100644 --- a/crates/interledger-stream/src/server.rs +++ b/crates/interledger-stream/src/server.rs @@ -1,15 +1,16 @@ use super::crypto::*; use super::packet::*; +use async_trait::async_trait; use base64; use bytes::{Bytes, BytesMut}; use chrono::{DateTime, Utc}; -use futures::{future::result, sync::mpsc::UnboundedSender}; +use futures::channel::mpsc::UnboundedSender; use hex; use interledger_packet::{ Address, ErrorCode, Fulfill, FulfillBuilder, PacketType as IlpPacketType, Prepare, Reject, RejectBuilder, }; -use interledger_service::{Account, BoxedIlpFuture, OutgoingRequest, OutgoingService, Username}; +use interledger_service::{Account, IlpResult, OutgoingRequest, OutgoingService, Username}; use log::debug; use serde::{Deserialize, Serialize}; use std::marker::PhantomData; @@ -58,7 +59,7 @@ impl ConnectionGenerator { // is valid and adding base64-url characters will always be valid let destination_account = base_address.with_suffix(&token.as_ref()).unwrap(); - debug!("Generated address: {}", destination_account,); + debug!("Generated address: {}", destination_account); (destination_account, shared_secret) } @@ -84,12 +85,18 @@ impl ConnectionGenerator { } } +/// Notification that STREAM fulfilled a packet and received a single Interledger payment, used by Pubsub API consumers #[derive(Debug, Deserialize, Serialize)] pub struct PaymentNotification { + /// The username of the account that received the Interledger payment pub to_username: Username, + /// The username of the account that routed the Interledger payment to this node pub from_username: Username, + /// The ILP Address of the receiver of the payment notification pub destination: Address, + /// The amount received pub amount: u64, + /// The time this payment notification was fired in RFC3339 format pub timestamp: String, } @@ -97,12 +104,15 @@ pub struct PaymentNotification { pub trait StreamNotificationsStore { type Account: Account; + /// *Synchronously* saves the sending side of the provided account id's websocket channel to the store's memory fn add_payment_notification_subscription( &self, account_id: Uuid, sender: UnboundedSender, ); + /// Instructs the store to publish the provided payment notification object + /// via its Pubsub interface fn publish_payment_notification(&self, _payment: PaymentNotification); } @@ -137,17 +147,16 @@ where } } +#[async_trait] impl OutgoingService for StreamReceiverService where S: StreamNotificationsStore + Send + Sync + 'static + Clone, - O: OutgoingService, - A: Account, + O: OutgoingService + Send + Sync + Clone, + A: Account + Send + Sync + Clone, { - type Future = BoxedIlpFuture; - /// Try fulfilling the request if it is for this STREAM server or pass it to the next /// outgoing handler if not. - fn send_request(&mut self, request: OutgoingRequest) -> Self::Future { + async fn send_request(&mut self, request: OutgoingRequest) -> IlpResult { let to_username = request.to.username().clone(); let from_username = request.from.username().clone(); let amount = request.prepare.amount(); @@ -182,14 +191,14 @@ where // the sender will likely see an error like F02: Unavailable (this is // a bit confusing but the packet data should not be modified at all // under normal circumstances). - return Box::new(self.next.send_request(request)); + return self.next.send_request(request).await; } } }; - return Box::new(result(response)); + return response; } } - Box::new(self.next.send_request(request)) + self.next.send_request(request).await } } @@ -367,7 +376,7 @@ mod receiving_money { fn fulfills_valid_packet() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); - let connection_generator = ConnectionGenerator::new(server_secret.clone()); + let connection_generator = ConnectionGenerator::new(server_secret); let (destination_account, shared_secret) = connection_generator.generate_address_and_secret(&ilp_address); let stream_packet = test_stream_packet(); @@ -395,7 +404,7 @@ mod receiving_money { fn fulfills_valid_packet_without_connection_tag() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); - let connection_generator = ConnectionGenerator::new(server_secret.clone()); + let connection_generator = ConnectionGenerator::new(server_secret); let (destination_account, shared_secret) = connection_generator.generate_address_and_secret(&ilp_address); let stream_packet = test_stream_packet(); @@ -423,7 +432,7 @@ mod receiving_money { fn rejects_modified_data() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); - let connection_generator = ConnectionGenerator::new(server_secret.clone()); + let connection_generator = ConnectionGenerator::new(server_secret); let (destination_account, shared_secret) = connection_generator.generate_address_and_secret(&ilp_address); let stream_packet = test_stream_packet(); @@ -452,7 +461,7 @@ mod receiving_money { fn rejects_too_little_money() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); - let connection_generator = ConnectionGenerator::new(server_secret.clone()); + let connection_generator = ConnectionGenerator::new(server_secret); let (destination_account, shared_secret) = connection_generator.generate_address_and_secret(&ilp_address); @@ -491,7 +500,7 @@ mod receiving_money { fn fulfills_packets_sent_to_javascript_receiver() { // This was created by the JS ilp-protocol-stream library let ilp_address = Address::from_str("test.peerB").unwrap(); - let prepare = Prepare::try_from(bytes::BytesMut::from(hex::decode("0c819900000000000001f43230313931303238323134313533383338f31a96346c613011947f39a0f1f4e573c2fc3e7e53797672b01d2898e90c9a0723746573742e70656572422e4e6a584430754a504275477a353653426d4933755836682d3b6cc484c0d4e9282275d4b37c6ae18f35b497ddbfcbce6d9305b9451b4395c3158aa75e05bf27582a237109ec6ca0129d840da7abd96826c8147d0d").unwrap())).unwrap(); + let prepare = Prepare::try_from(bytes::BytesMut::from(&hex::decode("0c819900000000000001f43230313931303238323134313533383338f31a96346c613011947f39a0f1f4e573c2fc3e7e53797672b01d2898e90c9a0723746573742e70656572422e4e6a584430754a504275477a353653426d4933755836682d3b6cc484c0d4e9282275d4b37c6ae18f35b497ddbfcbce6d9305b9451b4395c3158aa75e05bf27582a237109ec6ca0129d840da7abd96826c8147d0d").unwrap()[..])).unwrap(); let condition = prepare.execution_condition().to_vec(); let server_secret = Bytes::from(vec![0u8; 32]); let connection_generator = ConnectionGenerator::new(server_secret); @@ -519,15 +528,15 @@ mod receiving_money { mod stream_receiver_service { use super::*; use crate::test_helpers::*; - use futures::Future; use interledger_packet::PrepareBuilder; use interledger_service::outgoing_service_fn; use std::convert::TryFrom; use std::str::FromStr; use std::time::UNIX_EPOCH; - #[test] - fn fulfills_correct_packets() { + + #[tokio::test] + async fn fulfills_correct_packets() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); let connection_generator = ConnectionGenerator::new(server_secret.clone()); @@ -550,7 +559,7 @@ mod stream_receiver_service { let mut service = StreamReceiverService::new( server_secret.clone(), DummyStore, - outgoing_service_fn(|_: OutgoingRequest| -> BoxedIlpFuture { + outgoing_service_fn(|_: OutgoingRequest| -> IlpResult { panic!("shouldn't get here") }), ); @@ -572,12 +581,12 @@ mod stream_receiver_service { original_amount: prepare.amount(), prepare, }) - .wait(); + .await; assert!(result.is_ok()); } - #[test] - fn rejects_invalid_packets() { + #[tokio::test] + async fn rejects_invalid_packets() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); let connection_generator = ConnectionGenerator::new(server_secret.clone()); @@ -632,12 +641,12 @@ mod stream_receiver_service { original_amount: prepare.amount(), prepare, }) - .wait(); + .await; assert!(result.is_err()); } - #[test] - fn passes_on_packets_not_for_it() { + #[tokio::test] + async fn passes_on_packets_not_for_it() { let ilp_address = Address::from_str("example.destination").unwrap(); let server_secret = Bytes::from(&[1; 32][..]); let connection_generator = ConnectionGenerator::new(server_secret.clone()); @@ -690,7 +699,7 @@ mod stream_receiver_service { }, prepare, }) - .wait(); + .await; assert!(result.is_err()); assert_eq!( result.unwrap_err().triggered_by().unwrap(),