diff --git a/core/src/connection/pool.rs b/core/src/connection/pool.rs index fcfee926388c..e6441bebb5de 100644 --- a/core/src/connection/pool.rs +++ b/core/src/connection/pool.rs @@ -44,7 +44,7 @@ use either::Either; use fnv::FnvHashMap; use futures::prelude::*; use smallvec::SmallVec; -use std::{error, fmt, hash::Hash, task::Context, task::Poll}; +use std::{convert::TryFrom as _, error, fmt, hash::Hash, num::NonZeroU32, task::Context, task::Poll}; /// A connection `Pool` manages a set of connections for each peer. pub struct Pool { @@ -86,7 +86,7 @@ pub enum PoolEvent<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TC /// A new connection has been established. ConnectionEstablished { connection: EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>, - num_established: usize, + num_established: NonZeroU32, }, /// An established connection has encountered an error. @@ -99,7 +99,7 @@ pub enum PoolEvent<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TC /// A reference to the pool that used to manage the connection. pool: &'a mut Pool, /// The remaining number of established connections to the same peer. - num_established: usize, + num_established: u32, }, /// A connection attempt failed. @@ -580,7 +580,7 @@ where let num_established = if let Some(conns) = self.established.get_mut(connected.peer_id()) { conns.remove(&id); - conns.len() + u32::try_from(conns.len()).unwrap() } else { 0 }; @@ -600,7 +600,7 @@ where .map_or(0, |conns| conns.len()); if let Err(e) = self.limits.check_established(current) { let connected = entry.close(); - let num_established = e.current; + let num_established = u32::try_from(e.current).unwrap(); return Poll::Ready(PoolEvent::ConnectionError { id, connected, @@ -623,7 +623,8 @@ where // Add the connection to the pool. let peer = entry.connected().peer_id().clone(); let conns = self.established.entry(peer).or_default(); - let num_established = conns.len() + 1; + let num_established = NonZeroU32::new(u32::try_from(conns.len() + 1).unwrap()) + .expect("n + 1 is always non-zero; qed"); conns.insert(id, endpoint); match self.get(id) { Some(PoolConnection::Established(connection)) => diff --git a/core/src/network.rs b/core/src/network.rs index 94b7df2d326a..52d0da801879 100644 --- a/core/src/network.rs +++ b/core/src/network.rs @@ -52,6 +52,7 @@ use fnv::{FnvHashMap}; use futures::{prelude::*, future}; use std::{ collections::hash_map, + convert::TryFrom as _, error, fmt, hash::Hash, @@ -517,7 +518,7 @@ where // A pending outgoing connection to a known peer failed. let mut attempt = dialing.remove(&peer_id).expect("by (1)"); - let num_remain = attempt.next.len(); + let num_remain = u32::try_from(attempt.next.len()).unwrap(); let failed_addr = attempt.current.clone(); let opts = diff --git a/core/src/network/event.rs b/core/src/network/event.rs index 835b6e643cdb..233b35fd9d34 100644 --- a/core/src/network/event.rs +++ b/core/src/network/event.rs @@ -42,7 +42,7 @@ use crate::{ transport::{Transport, TransportError}, }; use futures::prelude::*; -use std::{error, fmt, hash::Hash}; +use std::{error, fmt, hash::Hash, num::NonZeroU32}; /// Event that can happen on the `Network`. pub enum NetworkEvent<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> @@ -88,7 +88,7 @@ where /// A new connection arrived on a listener. IncomingConnection(IncomingConnectionEvent<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>), - /// A new connection was arriving on a listener, but an error happened when negotiating it. + /// An error happened on a connection during its initial handshake. /// /// This can include, for example, an error during the handshake of the encryption layer, or /// the connection unexpectedly closed. @@ -105,8 +105,9 @@ where ConnectionEstablished { /// The newly established connection. connection: EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>, - /// The total number of established connections to the same peer. - num_established: usize, + /// The total number of established connections to the same peer, including the one that + /// has just been opened. + num_established: NonZeroU32, }, /// An established connection to a peer has encountered an error. @@ -118,13 +119,13 @@ where /// The error that occurred. error: ConnectionError<::Error>, /// The remaining number of established connections to the same peer. - num_established: usize, + num_established: u32, }, /// A dialing attempt to an address of a peer failed. DialError { /// The number of remaining dialing attempts. - attempts_remaining: usize, + attempts_remaining: u32, /// Id of the peer we were trying to dial. peer_id: TPeerId, diff --git a/core/tests/network_dial_error.rs b/core/tests/network_dial_error.rs index e4603f34e25c..c01cebddfa2a 100644 --- a/core/tests/network_dial_error.rs +++ b/core/tests/network_dial_error.rs @@ -258,7 +258,7 @@ fn multiple_addresses_err() { assert_eq!(attempts_remaining, 0); return Poll::Ready(Ok(())); } else { - assert_eq!(attempts_remaining, addresses.len()); + assert_eq!(attempts_remaining, addresses.len() as u32); } }, Poll::Ready(_) => unreachable!(), diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index ddacec46852e..29bccee8239a 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -13,6 +13,7 @@ categories = ["network-programming", "asynchronous"] futures = "0.3.1" libp2p-core = { version = "0.16.0", path = "../core" } log = "0.4" +rand = "0.7" smallvec = "1.0" wasm-timer = "0.2" void = "1" diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 6e92aab793ba..b9a2e6dc94ab 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -79,24 +79,30 @@ pub use protocols_handler::{ SubstreamProtocol }; -use protocols_handler::NodeHandlerWrapperBuilder; +use protocols_handler::{ + NodeHandlerWrapperBuilder, + NodeHandlerWrapperError, +}; use futures::{ prelude::*, executor::{ThreadPool, ThreadPoolBuilder}, stream::FusedStream, }; use libp2p_core::{ + ConnectedPoint, Executor, Transport, Multiaddr, Negotiated, PeerId, connection::{ + ConnectionError, ConnectionId, ConnectionInfo, EstablishedConnection, IntoConnectionHandler, ListenerId, + PendingConnectionError, Substream }, transport::{TransportError, boxed::Boxed as BoxTransport}, @@ -116,6 +122,7 @@ use registry::{Addresses, AddressIntoIter}; use smallvec::SmallVec; use std::{error, fmt, hash::Hash, io, ops::{Deref, DerefMut}, pin::Pin, task::{Context, Poll}}; use std::collections::HashSet; +use std::num::NonZeroU32; use upgrade::UpgradeInfoSend as _; /// Contains the state of the network, plus the way it should behave. @@ -135,29 +142,110 @@ pub type NegotiatedSubstream = Negotiated>; /// Event generated by the `Swarm`. #[derive(Debug)] -pub enum SwarmEvent { +pub enum SwarmEvent { /// Event generated by the `NetworkBehaviour`. Behaviour(TBvEv), - /// We are now connected to the given peer. - Connected(PeerId), - /// We are now disconnected from the given peer. - Disconnected(PeerId), - /// One of our listeners has reported a new local listening address. - NewListenAddr(Multiaddr), - /// One of our listeners has reported the expiration of a listening address. - ExpiredListenAddr(Multiaddr), + /// A connection to the given peer has been opened. + ConnectionEstablished { + /// Identity of the peer that we have connected to. + peer_id: PeerId, + /// Endpoint of the connection that has been opened. + endpoint: ConnectedPoint, + /// Number of established connections to this peer, including the one that has just been + /// opened. + num_established: NonZeroU32, + }, + /// A connection with the given peer has been closed. + ConnectionClosed { + /// Identity of the peer that we have connected to. + peer_id: PeerId, + /// Endpoint of the connection that has been closed. + endpoint: ConnectedPoint, + /// Number of other remaining connections to this same peer. + num_established: u32, + /// Reason for the disconnection. + cause: ConnectionError>, + }, + /// A new connection arrived on a listener and is in the process of protocol negotiation. + /// + /// A corresponding [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished), + /// [`BannedPeer`](SwarmEvent::BannedPeer), or + /// [`IncomingConnectionError`](SwarmEvent::IncomingConnectionError) event will later be + /// generated for this connection. + IncomingConnection { + /// Local connection address. + /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) + /// event. + local_addr: Multiaddr, + /// Address used to send back data to the remote. + send_back_addr: Multiaddr, + }, + /// An error happened on a connection during its initial handshake. + /// + /// This can include, for example, an error during the handshake of the encryption layer, or + /// the connection unexpectedly closed. + IncomingConnectionError { + /// Local connection address. + /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) + /// event. + local_addr: Multiaddr, + /// Address used to send back data to the remote. + send_back_addr: Multiaddr, + /// The error that happened. + error: PendingConnectionError, + }, + /// We connected to a peer, but we immediately closed the connection because that peer is banned. + BannedPeer { + /// Identity of the banned peer. + peer_id: PeerId, + /// Endpoint of the connection that has been closed. + endpoint: ConnectedPoint, + }, + /// Starting to try to reach the given peer. + /// + /// We are trying to connect to this peer until a [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished) + /// event is reported, or a [`UnreachableAddr`](SwarmEvent::UnreachableAddr) event is reported + /// with `attempts_remaining` equal to 0. + Dialing(PeerId), /// Tried to dial an address but it ended up being unreachaable. UnreachableAddr { - /// `PeerId` that we were trying to reach. `None` if we don't know in advance which peer - /// we were trying to reach. - peer_id: Option, + /// `PeerId` that we were trying to reach. + peer_id: PeerId, + /// Address that we failed to reach. + address: Multiaddr, + /// Error that has been encountered. + error: PendingConnectionError, + /// Number of remaining connection attempts that are being tried for this peer. + attempts_remaining: u32, + }, + /// Tried to dial an address but it ended up being unreachaable. + /// Contrary to `UnreachableAddr`, we don't know the identity of the peer that we were trying + /// to reach. + UnknownPeerUnreachableAddr { /// Address that we failed to reach. address: Multiaddr, /// Error that has been encountered. - error: Box, + error: PendingConnectionError, + }, + /// One of our listeners has reported a new local listening address. + NewListenAddr(Multiaddr), + /// One of our listeners has reported the expiration of a listening address. + ExpiredListenAddr(Multiaddr), + /// One of the listeners gracefully closed. + ListenerClosed { + /// The addresses that the listener was listening on. These addresses are now considered + /// expired, similar to if a [`ExpiredListenAddr`](SwarmEvent::ExpiredListenAddr) event + /// has been generated for each of them. + addresses: Vec, + /// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err` + /// if the stream produced an error. + reason: Result<(), io::Error>, + }, + /// One of the listeners reported a non-fatal error. + ListenerError { + /// The listener error. + error: io::Error, }, - /// Startng to try to reach the given peer. - StartConnect(PeerId), } /// Contains the state of the network, plus the way it should behave. @@ -230,14 +318,15 @@ where { } -impl +impl ExpandedSwarm where TBehaviour: NetworkBehaviour, TInEvent: Clone + Send + 'static, TOutEvent: Send + 'static, TConnInfo: ConnectionInfo + fmt::Debug + Clone + Send + 'static, THandler: IntoProtocolsHandler + Send + 'static, - THandler::Handler: ProtocolsHandler, + THandler::Handler: ProtocolsHandler, + THandleErr: error::Error + Send + 'static, { /// Builds a new `Swarm`. pub fn new(transport: TTransport, behaviour: TBehaviour, local_peer_id: PeerId) -> Self @@ -360,7 +449,7 @@ where TBehaviour: NetworkBehaviour, /// Returns the next event that happens in the `Swarm`. /// /// Includes events from the `NetworkBehaviour` but also events about the connections status. - pub async fn next_event(&mut self) -> SwarmEvent { + pub async fn next_event(&mut self) -> SwarmEvent { future::poll_fn(move |cx| ExpandedSwarm::poll_next_event(Pin::new(self), cx)).await } @@ -380,7 +469,7 @@ where TBehaviour: NetworkBehaviour, /// /// Polls the `Swarm` for the next event. fn poll_next_event(mut self: Pin<&mut Self>, cx: &mut Context) - -> Poll> + -> Poll> { // We use a `this` variable because the compiler can't mutably borrow multiple times // across a `Deref`. @@ -398,38 +487,62 @@ where TBehaviour: NetworkBehaviour, this.behaviour.inject_event(peer, connection, event); }, Poll::Ready(NetworkEvent::ConnectionEstablished { connection, num_established }) => { - let peer = connection.peer_id().clone(); - if this.banned_peers.contains(&peer) { - this.network.peer(peer) + let peer_id = connection.peer_id().clone(); + let endpoint = connection.endpoint().clone(); + if this.banned_peers.contains(&peer_id) { + this.network.peer(peer_id.clone()) .into_connected() .expect("the Network just notified us that we were connected; QED") .disconnect(); - } else if num_established == 1 { - let endpoint = connection.endpoint().clone(); - this.behaviour.inject_connected(peer.clone(), endpoint); - return Poll::Ready(SwarmEvent::Connected(peer)); + return Poll::Ready(SwarmEvent::BannedPeer { + peer_id, + endpoint, + }); + } else if num_established.get() == 1 { + this.behaviour.inject_connected(peer_id.clone(), endpoint.clone()); + return Poll::Ready(SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + num_established, + }); } else { // For now, secondary connections are not explicitly reported to // the behaviour. A behaviour only gets awareness of the // connections via the events emitted from the connection handlers. log::trace!("Secondary connection established: {:?}; Total (peer): {}.", connection.connected(), num_established); + return Poll::Ready(SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + num_established, + }); } }, Poll::Ready(NetworkEvent::ConnectionError { connected, error, num_established }) => { log::debug!("Connection {:?} closed by {:?}", connected, error); + let peer_id = connected.peer_id().clone(); + let endpoint = connected.endpoint; if num_established == 0 { - let peer = connected.peer_id().clone(); - let endpoint = connected.endpoint; - this.behaviour.inject_disconnected(&peer, endpoint); - return Poll::Ready(SwarmEvent::Disconnected(peer)); + this.behaviour.inject_disconnected(&peer_id, endpoint.clone()); } + return Poll::Ready(SwarmEvent::ConnectionClosed { + peer_id, + endpoint, + cause: error, + num_established, + }); }, Poll::Ready(NetworkEvent::IncomingConnection(incoming)) => { let handler = this.behaviour.new_handler(); + let local_addr = incoming.local_addr().clone(); + let send_back_addr = incoming.send_back_addr().clone(); if let Err(e) = incoming.accept(handler.into_node_handler_builder()) { log::warn!("Incoming connection rejected: {:?}", e); } + return Poll::Ready(SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + }); }, Poll::Ready(NetworkEvent::NewListenerAddress { listener_id, listen_addr }) => { log::debug!("Listener {:?}; New address: {:?}", listener_id, listen_addr); @@ -451,11 +564,24 @@ where TBehaviour: NetworkBehaviour, this.behaviour.inject_expired_listen_addr(addr); } this.behaviour.inject_listener_closed(listener_id); + return Poll::Ready(SwarmEvent::ListenerClosed { + addresses, + reason, + }); } - Poll::Ready(NetworkEvent::ListenerError { listener_id, error }) => - this.behaviour.inject_listener_error(listener_id, &error), - Poll::Ready(NetworkEvent::IncomingConnectionError { error, .. }) => { + Poll::Ready(NetworkEvent::ListenerError { listener_id, error }) => { + this.behaviour.inject_listener_error(listener_id, &error); + return Poll::Ready(SwarmEvent::ListenerError { + error, + }); + }, + Poll::Ready(NetworkEvent::IncomingConnectionError { local_addr, send_back_addr, error }) => { log::debug!("Incoming connection failed: {:?}", error); + return Poll::Ready(SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + }); }, Poll::Ready(NetworkEvent::DialError { peer_id, multiaddr, error, attempts_remaining }) => { log::debug!( @@ -466,19 +592,19 @@ where TBehaviour: NetworkBehaviour, this.behaviour.inject_dial_failure(&peer_id); } return Poll::Ready(SwarmEvent::UnreachableAddr { - peer_id: Some(peer_id.clone()), + peer_id, address: multiaddr, - error: Box::new(error), + error, + attempts_remaining, }); }, Poll::Ready(NetworkEvent::UnknownPeerDialError { multiaddr, error, .. }) => { log::debug!("Connection attempt to address {:?} of unknown peer failed with {:?}", multiaddr, error); this.behaviour.inject_addr_reach_failure(None, &multiaddr, &error); - return Poll::Ready(SwarmEvent::UnreachableAddr { - peer_id: None, + return Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address: multiaddr, - error: Box::new(error), + error, }); }, } @@ -542,7 +668,7 @@ where TBehaviour: NetworkBehaviour, this.behaviour.inject_dial_failure(&peer_id); } else { ExpandedSwarm::dial(&mut *this, peer_id.clone()); - return Poll::Ready(SwarmEvent::StartConnect(peer_id)) + return Poll::Ready(SwarmEvent::Dialing(peer_id)) } }, Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => { diff --git a/swarm/src/protocols_handler.rs b/swarm/src/protocols_handler.rs index 4be0d76c3474..160989180a92 100644 --- a/swarm/src/protocols_handler.rs +++ b/swarm/src/protocols_handler.rs @@ -43,6 +43,7 @@ mod map_out; mod node_handler; mod one_shot; mod select; +pub mod multi; pub use crate::upgrade::{ InboundUpgradeSend, diff --git a/swarm/src/protocols_handler/multi.rs b/swarm/src/protocols_handler/multi.rs new file mode 100644 index 000000000000..3ecaab9b0fd3 --- /dev/null +++ b/swarm/src/protocols_handler/multi.rs @@ -0,0 +1,376 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! A [`ProtocolsHandler`] implementation that combines multiple other `ProtocolsHandler`s +//! indexed by some key. + +use crate::NegotiatedSubstream; +use crate::protocols_handler::{ + KeepAlive, + IntoProtocolsHandler, + ProtocolsHandler, + ProtocolsHandlerEvent, + ProtocolsHandlerUpgrErr, + SubstreamProtocol +}; +use crate::upgrade::{ + InboundUpgradeSend, + OutboundUpgradeSend, + UpgradeInfoSend +}; +use futures::{future::BoxFuture, prelude::*}; +use libp2p_core::{ConnectedPoint, PeerId, upgrade::ProtocolName}; +use rand::Rng; +use std::{ + collections::{HashMap, HashSet}, + error, + fmt, + hash::Hash, + iter::{self, FromIterator}, + task::{Context, Poll} +}; + +/// A [`ProtocolsHandler`] for multiple other `ProtocolsHandler`s. +#[derive(Clone)] +pub struct MultiHandler { + handlers: HashMap +} + +impl fmt::Debug for MultiHandler +where + K: fmt::Debug + Eq + Hash, + H: fmt::Debug +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("MultiHandler") + .field("handlers", &self.handlers) + .finish() + } +} + +impl MultiHandler +where + K: Hash + Eq, + H: ProtocolsHandler +{ + /// Create and populate a `MultiHandler` from the given handler iterator. + /// + /// It is an error for any two protocols handlers to share the same protocol name. + pub fn try_from_iter(iter: I) -> Result + where + I: IntoIterator + { + let m = MultiHandler { handlers: HashMap::from_iter(iter) }; + uniq_proto_names(m.handlers.values().map(|h| h.listen_protocol().into_upgrade().1))?; + Ok(m) + } +} + +impl ProtocolsHandler for MultiHandler +where + K: Clone + Hash + Eq + Send + 'static, + H: ProtocolsHandler, + H::InboundProtocol: InboundUpgradeSend, + H::OutboundProtocol: OutboundUpgradeSend +{ + type InEvent = (K, ::InEvent); + type OutEvent = (K, ::OutEvent); + type Error = ::Error; + type InboundProtocol = Upgrade::InboundProtocol>; + type OutboundProtocol = ::OutboundProtocol; + type OutboundOpenInfo = (K, ::OutboundOpenInfo); + + fn listen_protocol(&self) -> SubstreamProtocol { + let upgrades = self.handlers.iter() + .map(|(k, h)| (k.clone(), h.listen_protocol().into_upgrade().1)) + .collect(); + SubstreamProtocol::new(Upgrade { upgrades }) + } + + fn inject_fully_negotiated_outbound ( + &mut self, + protocol: ::Output, + (key, arg): Self::OutboundOpenInfo + ) { + if let Some(h) = self.handlers.get_mut(&key) { + h.inject_fully_negotiated_outbound(protocol, arg) + } else { + log::error!("inject_fully_negotiated_outbound: no handler for key") + } + } + + fn inject_fully_negotiated_inbound ( + &mut self, + (key, arg): ::Output + ) { + if let Some(h) = self.handlers.get_mut(&key) { + h.inject_fully_negotiated_inbound(arg) + } else { + log::error!("inject_fully_negotiated_inbound: no handler for key") + } + } + + fn inject_event(&mut self, (key, event): Self::InEvent) { + if let Some(h) = self.handlers.get_mut(&key) { + h.inject_event(event) + } else { + log::error!("inject_event: no handler for key") + } + } + + fn inject_dial_upgrade_error ( + &mut self, + (key, arg): Self::OutboundOpenInfo, + error: ProtocolsHandlerUpgrErr<::Error> + ) { + if let Some(h) = self.handlers.get_mut(&key) { + h.inject_dial_upgrade_error(arg, error) + } else { + log::error!("inject_dial_upgrade_error: no handler for protocol") + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + self.handlers.values() + .map(|h| h.connection_keep_alive()) + .max() + .unwrap_or(KeepAlive::No) + } + + fn poll(&mut self, cx: &mut Context) + -> Poll> + { + // Not always polling handlers in the same order should give anyone the chance to make progress. + let pos = rand::thread_rng().gen_range(0, self.handlers.len()); + + for (k, h) in self.handlers.iter_mut().skip(pos) { + if let Poll::Ready(e) = h.poll(cx) { + let e = e.map_outbound_open_info(|i| (k.clone(), i)).map_custom(|p| (k.clone(), p)); + return Poll::Ready(e) + } + } + + for (k, h) in self.handlers.iter_mut().take(pos) { + if let Poll::Ready(e) = h.poll(cx) { + let e = e.map_outbound_open_info(|i| (k.clone(), i)).map_custom(|p| (k.clone(), p)); + return Poll::Ready(e) + } + } + + Poll::Pending + } +} + +/// A [`IntoProtocolsHandler`] for multiple other `IntoProtocolsHandler`s. +#[derive(Clone)] +pub struct IntoMultiHandler { + handlers: HashMap +} + +impl fmt::Debug for IntoMultiHandler +where + K: fmt::Debug + Eq + Hash, + H: fmt::Debug +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("IntoMultiHandler") + .field("handlers", &self.handlers) + .finish() + } +} + + +impl IntoMultiHandler +where + K: Hash + Eq, + H: IntoProtocolsHandler +{ + /// Create and populate an `IntoMultiHandler` from the given iterator. + /// + /// It is an error for any two protocols handlers to share the same protocol name. + pub fn try_from_iter(iter: I) -> Result + where + I: IntoIterator + { + let m = IntoMultiHandler { handlers: HashMap::from_iter(iter) }; + uniq_proto_names(m.handlers.values().map(|h| h.inbound_protocol()))?; + Ok(m) + } +} + +impl IntoProtocolsHandler for IntoMultiHandler +where + K: Clone + Eq + Hash + Send + 'static, + H: IntoProtocolsHandler +{ + type Handler = MultiHandler; + + fn into_handler(self, p: &PeerId, c: &ConnectedPoint) -> Self::Handler { + MultiHandler { + handlers: self.handlers.into_iter() + .map(|(k, h)| (k, h.into_handler(p, c))) + .collect() + } + } + + fn inbound_protocol(&self) -> ::InboundProtocol { + Upgrade { + upgrades: self.handlers.iter() + .map(|(k, h)| (k.clone(), h.inbound_protocol())) + .collect() + } + } +} + +/// Index and protocol name pair used as `UpgradeInfo::Info`. +#[derive(Debug, Clone)] +pub struct IndexedProtoName(usize, H); + +impl ProtocolName for IndexedProtoName { + fn protocol_name(&self) -> &[u8] { + self.1.protocol_name() + } +} + +/// Inbound and outbound upgrade for all `ProtocolsHandler`s. +#[derive(Clone)] +pub struct Upgrade { + upgrades: Vec<(K, H)> +} + +impl fmt::Debug for Upgrade +where + K: fmt::Debug + Eq + Hash, + H: fmt::Debug +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Upgrade") + .field("upgrades", &self.upgrades) + .finish() + } +} + +impl UpgradeInfoSend for Upgrade +where + H: UpgradeInfoSend, + K: Send + 'static +{ + type Info = IndexedProtoName; + type InfoIter = std::vec::IntoIter; + + fn protocol_info(&self) -> Self::InfoIter { + self.upgrades.iter().enumerate() + .map(|(i, (_, h))| iter::repeat(i).zip(h.protocol_info())) + .flatten() + .map(|(i, h)| IndexedProtoName(i, h)) + .collect::>() + .into_iter() + } +} + +impl InboundUpgradeSend for Upgrade +where + H: InboundUpgradeSend, + K: Send + 'static +{ + type Output = (K, ::Output); + type Error = (K, ::Error); + type Future = BoxFuture<'static, Result>; + + fn upgrade_inbound(mut self, resource: NegotiatedSubstream, info: Self::Info) -> Self::Future { + let IndexedProtoName(index, info) = info; + let (key, upgrade) = self.upgrades.remove(index); + upgrade.upgrade_inbound(resource, info) + .map(move |out| { + match out { + Ok(o) => Ok((key, o)), + Err(e) => Err((key, e)) + } + }) + .boxed() + } +} + +impl OutboundUpgradeSend for Upgrade +where + H: OutboundUpgradeSend, + K: Send + 'static +{ + type Output = (K, ::Output); + type Error = (K, ::Error); + type Future = BoxFuture<'static, Result>; + + fn upgrade_outbound(mut self, resource: NegotiatedSubstream, info: Self::Info) -> Self::Future { + let IndexedProtoName(index, info) = info; + let (key, upgrade) = self.upgrades.remove(index); + upgrade.upgrade_outbound(resource, info) + .map(move |out| { + match out { + Ok(o) => Ok((key, o)), + Err(e) => Err((key, e)) + } + }) + .boxed() + } +} + +/// Check that no two protocol names are equal. +fn uniq_proto_names(iter: I) -> Result<(), DuplicateProtonameError> +where + I: Iterator, + T: UpgradeInfoSend +{ + let mut set = HashSet::new(); + for infos in iter { + for i in infos.protocol_info() { + let v = Vec::from(i.protocol_name()); + if set.contains(&v) { + return Err(DuplicateProtonameError(v)) + } else { + set.insert(v); + } + } + } + Ok(()) +} + +/// It is an error if two handlers share the same protocol name. +#[derive(Debug, Clone)] +pub struct DuplicateProtonameError(Vec); + +impl DuplicateProtonameError { + /// The protocol name bytes that occured in more than one handler. + pub fn protocol_name(&self) -> &[u8] { + &self.0 + } +} + +impl fmt::Display for DuplicateProtonameError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Ok(s) = std::str::from_utf8(&self.0) { + write!(f, "duplicate protocol name: {}", s) + } else { + write!(f, "duplicate protocol name: {:?}", self.0) + } + } +} + +impl error::Error for DuplicateProtonameError {} +