-
Notifications
You must be signed in to change notification settings - Fork 56
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
GossipSub: cancel inflight msgs when receiving duplicate #851
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -139,20 +139,17 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} = | |
|
||
libp2p_pubsub_peers.set(p.peers.len.int64) | ||
|
||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [Defect].} = | ||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg): Future[void] {.raises: [Defect].} = | ||
## Attempt to send `msg` to remote peer | ||
## | ||
|
||
trace "sending pubsub message to peer", peer, msg = shortLog(msg) | ||
peer.send(msg, p.anonymize) | ||
|
||
proc broadcast*( | ||
proc updateBroadcastMetrics( | ||
p: PubSub, | ||
sendPeers: auto, # Iteratble[PubSubPeer] | ||
msg: RPCMsg) {.raises: [Defect].} = | ||
## Attempt to send `msg` to the given peers | ||
|
||
let npeers = sendPeers.len.int64 | ||
msg: RPCMsg, | ||
npeers: int64) = | ||
for sub in msg.subscriptions: | ||
if sub.subscribe: | ||
if p.knownTopics.contains(sub.topic): | ||
|
@@ -192,24 +189,50 @@ proc broadcast*( | |
else: | ||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = ["generic"]) | ||
|
||
proc broadcast*( | ||
p: PubSub, | ||
sendPeers: auto, # Iteratble[PubSubPeer] | ||
msg: RPCMsg) {.raises: [Defect].} = | ||
## Attempt to send `msg` to the given peers | ||
|
||
p.updateBroadcastMetrics(msg, sendPeers.len.int64) | ||
trace "broadcasting messages to peers", | ||
peers = sendPeers.len, msg = shortLog(msg) | ||
|
||
if anyIt(sendPeers, it.hasObservers): | ||
for peer in sendPeers: | ||
p.send(peer, msg) | ||
asyncSpawn p.send(peer, msg) | ||
else: | ||
# Fast path that only encodes message once | ||
let encoded = encodeRpcMsg(msg, p.anonymize) | ||
for peer in sendPeers: | ||
asyncSpawn peer.sendEncoded(encoded) | ||
|
||
proc cancellableBroadcast*( | ||
p: PubSub, | ||
sendPeers: auto, # Iteratble[PubSubPeer] | ||
msg: RPCMsg): Table[PeerId, Future[void]] {.raises: [Defect].} = | ||
## Attempt to send `msg` to the given peers | ||
|
||
p.updateBroadcastMetrics(msg, sendPeers.len) | ||
trace "broadcasting messages to peers", | ||
peers = sendPeers.len, msg = shortLog(msg) | ||
|
||
if anyIt(sendPeers, it.hasObservers): | ||
for peer in sendPeers: | ||
result[peer.peerId] = p.send(peer, msg) | ||
else: | ||
# Fast path that only encodes message once | ||
let encoded = encodeRpcMsg(msg, p.anonymize) | ||
for peer in sendPeers: | ||
result[peer.peerId] = peer.sendEncoded(encoded) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. doesn't this already synchronously put bytes in the send buffer? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. With mplex, that will indeed go synchronously through every layer, down to chronos
So we could add cancellation support to linux at least With yamux, there is an intermediary queue before chronos (since there is per-stream backpressure), that does support cancellation: So this PR only makes sense if we get chronos support for write cancellation on linux, or enable yamux in nimbus :/ There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Getting TCP cancellation support in chronos seems less complex than expected (even on windows): status-im/nim-chronos#353 |
||
|
||
proc sendSubs*(p: PubSub, | ||
peer: PubSubPeer, | ||
topics: openArray[string], | ||
subscribe: bool) = | ||
## send subscriptions to remote peer | ||
p.send(peer, RPCMsg.withSubs(topics, subscribe)) | ||
asyncSpawn p.send(peer, RPCMsg.withSubs(topics, subscribe)) | ||
|
||
for topic in topics: | ||
if subscribe: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
can't there be two inflight lazy broadcasts of the same message?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That shouldn't happen, I may add an assert to be sure