Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for multihop payments #132

Merged
merged 11 commits into from
Oct 20, 2022
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ go 1.19
require (
github.com/miguelmota/go-ethereum-hdwallet v0.1.1
github.com/multiformats/go-multiaddr v0.7.0
github.com/statechannels/go-nitro v0.0.0-20221006155253-f0e74994b9e4
github.com/statechannels/go-nitro v0.0.0-20221009024643-ab7b1a648d10
)

require (
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -820,8 +820,8 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/statechannels/go-nitro v0.0.0-20221006155253-f0e74994b9e4 h1:3I63UGzArK77M6umURgToOzi/WkJ3UXfW6kOkEPUYfU=
github.com/statechannels/go-nitro v0.0.0-20221006155253-f0e74994b9e4/go.mod h1:Dg68KKTZKjsWhSQbSa5HLJtzzgxtGqofj0JoqMmb8RU=
github.com/statechannels/go-nitro v0.0.0-20221009024643-ab7b1a648d10 h1:xRQnotT6CXS7MQ8y1ZeQxs3gJ0JOqDcaAVOZwstL/CE=
github.com/statechannels/go-nitro v0.0.0-20221009024643-ab7b1a648d10/go.mod h1:Dg68KKTZKjsWhSQbSa5HLJtzzgxtGqofj0JoqMmb8RU=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
Expand Down
3 changes: 2 additions & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (

func main() {
run.InvokeMap(map[string]interface{}{
"virtual-payment": run.InitializedTestCaseFn(tests.CreateVirtualPaymentTest),
"virtual-payment": run.InitializedTestCaseFn(tests.CreateVirtualPaymentTest),
"multi-hop-virtual-payment": run.InitializedTestCaseFn(tests.CreateMultiHopVirtualPaymentTest),
})
}
17 changes: 17 additions & 0 deletions manifest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,20 @@ numOfPayeePayers = {type = "int", default = 0, desc = "The number of instances t
numOfPayees = {type = "int", default = 1, desc = "The number of instances that should play the role of the payee"}
numOfPayers = {type = "int", default = 1, desc = "The number of instances that should play the role of the payer"}
paymentTestDuration = {type = "int", default = 10, unit = "seconds"}

[[testcases]]
instances = {min = 2, max = 100, default = 5}
name = "multi-hop-virtual-payment"

[testcases.params]
concurrentPaymentJobs = {type = "int", desc = "The number of concurrent payment jobs a peer should attempt to maintain", default = 1}
isCI = {type = "bool", default = false, desc = "Whether this test is being run as from CI"}
isNightly = {type = "bool", default = false, desc = "Whether this test is being run as part of the nightly test suite"}
networkJitter = {type = "int", unit = "milliseconds", default = 0}
networkLatency = {type = "int", unit = "milliseconds", default = 0}
numOfHubs = {type = "int", default = 3, desc = "The number of instances that should play the role of the hub"}
numOfIntermediaries = {type = "int", default = 2, desc = "The number of intermediaries(hops) to use in the virtual payment channel"}
numOfPayeePayers = {type = "int", default = 0, desc = "The number of instances that should play the role of the payeepayer"}
numOfPayees = {type = "int", default = 4, desc = "The number of instances that should play the role of the payee"}
numOfPayers = {type = "int", default = 1, desc = "The number of instances that should play the role of the payer"}
paymentTestDuration = {type = "int", default = 10, unit = "seconds"}
1 change: 1 addition & 0 deletions peer/peer.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ const (
type PeerInfo struct {
p2pms.PeerInfo
Role Role
Seq int64
}

// IsPayer returns true if the peer's role is a Payer or PayeePayer
Expand Down
213 changes: 213 additions & 0 deletions tests/multi-hop-virtual-payment.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,213 @@
package tests

import (
"context"
"fmt"
"math/big"
"math/rand"
"os"
"time"

"github.com/ethereum/go-ethereum/crypto"
"github.com/statechannels/go-nitro-testground/chain"
c "github.com/statechannels/go-nitro-testground/config"
"github.com/statechannels/go-nitro-testground/peer"
"github.com/statechannels/go-nitro-testground/utils"
"github.com/statechannels/go-nitro/channel/state/outcome"
nitro "github.com/statechannels/go-nitro/client"
"github.com/statechannels/go-nitro/client/engine"
p2pms "github.com/statechannels/go-nitro/client/engine/messageservice/p2p-message-service"
"github.com/statechannels/go-nitro/client/engine/store"
"github.com/statechannels/go-nitro/protocols"
"github.com/statechannels/go-nitro/types"
"github.com/testground/sdk-go/run"
"github.com/testground/sdk-go/runtime"
"github.com/testground/sdk-go/sync"
)

func CreateMultiHopVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) error {
// The default frequency of diagnostics is 10 seconds.
// That's a bit too slow for most of our test runs.
runEnv.D().SetFrequency(1 * time.Second)
ctx := context.Background()

client := init.SyncClient
net := init.NetClient

networkJitterMS, networkLatencyMS := runEnv.IntParam("networkJitter"), runEnv.IntParam("networkLatency")
// instantiate a network client amd wait for it to be ready.
err := utils.ConfigureNetworkClient(ctx, net, client, runEnv, networkJitterMS, networkLatencyMS)
if err != nil {
panic(err)
}

seq := init.GlobalSeq
ip := net.MustGetDataNetworkIP()

runConfig, err := c.GetRunConfig(runEnv)
if err != nil {
panic(err)
}

role := peer.GetRole(seq, runConfig)
// We use the sequence in the random source so we generate a unique key even if another client is running at the same time
privateKey, err := crypto.GenerateKey()
if err != nil {
panic(err)
}

pk := crypto.FromECDSA(privateKey)
address := crypto.PubkeyToAddress(privateKey.PublicKey)
port := (START_PORT) + int(seq)
ipAddress := ip.String()

// Create the ms using the given key
ms := p2pms.NewMessageService(ipAddress, port, pk)
client.MustSignalAndWait(ctx, "msStarted", runEnv.TestInstanceCount)

mePeerInfo := peer.PeerInfo{PeerInfo: p2pms.PeerInfo{Address: address, IpAddress: ipAddress, Port: port, Id: ms.Id()}, Role: role, Seq: seq}
me := peer.MyInfo{PeerInfo: mePeerInfo, PrivateKey: *privateKey}

runEnv.RecordMessage("I am address:%s role:%d seq:%d", me.Address, me.Role, me.Seq)

utils.RecordRunInfo(me, runConfig, runEnv.D())

// Broadcasts our info and get peer info from all other instances.
peers := utils.SharePeerInfo(me.PeerInfo, ctx, client, runEnv.TestInstanceCount)

// Register our peers with the message service
ms.AddPeers(peer.GetMessageServicePeers(peers))
client.MustSignalAndWait(ctx, "peersAdded", runEnv.TestInstanceCount)

store := store.NewMemStore(crypto.FromECDSA(&me.PrivateKey))

// We skip the 0x prefix by slicing from index 2
shortAddress := me.Address.String()[2:8]
logPath := fmt.Sprintf("./outputs/nitro-client-%s-role-%d.log", shortAddress, me.Role)
// The outputs folder will be copied when results are collected.
logDestination, _ := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666)

// All instances wait until the NitroAdjudicator has been deployed (seq = 1 instance is responsible)
cs := chain.NewChainService(ctx, seq, logDestination)
contractSetup := sync.State("contractSetup")
client.MustSignalEntry(ctx, contractSetup)
client.MustBarrier(ctx, contractSetup, runEnv.TestInstanceCount)

nClient := nitro.New(ms, cs, store, logDestination, &engine.PermissivePolicy{}, runEnv.D())

cm := utils.NewCompletionMonitor(&nClient, runEnv.RecordMessage)
defer cm.Close()

// We wait until everyone has chosen an address.
client.MustSignalAndWait(ctx, "client created", runEnv.TestInstanceCount)

client.MustSignalAndWait(ctx, "message service connected", runEnv.TestInstanceCount)

ledgerIds := utils.CreateLedgerChannels(nClient, cm, utils.FINNEY_IN_WEI, me.PeerInfo, peers)
lalexgap marked this conversation as resolved.
Show resolved Hide resolved
if len(ledgerIds) > 0 {
runEnv.RecordMessage("%s: Created Ledgers %s", me.Address, utils.AbbreviateSlice(ledgerIds))
}

// Create ledger channels with all the hubs

client.MustSignalAndWait(ctx, sync.State("ledgerDone"), runEnv.TestInstanceCount)

if me.IsPayer() {

hubs := peer.FilterByRole(peers, peer.Hub)
payees := peer.FilterByRole(peers, peer.Payee)
payees = append(payees, peer.FilterByRole(peers, peer.PayerPayee)...)

createVirtualPaymentsJob := func() {
numHops := runEnv.IntParam("numOfIntermediaries")

selectedHubs := utils.SelectRandomHubs(hubs, numHops)
runEnv.RecordMessage("%s: Selected hubs %s", me.Address, utils.AbbreviateSlice(selectedHubs))
randomPayee := utils.SelectRandom(payees)

var channelId types.Destination
runEnv.D().Timer(fmt.Sprintf("time_to_first_payment,me=%s", me.Address)).Time(func() {

outcome := outcome.Exit{outcome.SingleAssetExit{
Allocations: outcome.Allocations{
outcome.Allocation{
Destination: types.AddressToDestination(me.Address),
Amount: big.NewInt(int64(10 * utils.GWEI_IN_WEI)),
},
outcome.Allocation{
Destination: types.AddressToDestination(randomPayee.Address),
Amount: big.NewInt(0),
},
},
}}

r := nClient.CreateVirtualPaymentChannel(selectedHubs, randomPayee.Address, 0, outcome)

channelId = r.ChannelId
cm.WaitForObjectivesToComplete([]protocols.ObjectiveId{r.Id})

runEnv.RecordMessage("Opened virtual channel %s with %s using hubs %s", utils.Abbreviate(channelId), utils.Abbreviate(randomPayee.Address), utils.AbbreviateSlice(selectedHubs))

paymentAmount := big.NewInt(utils.KWEI_IN_WEI)
nClient.Pay(r.ChannelId, paymentAmount)
runEnv.RecordMessage("Sent payment of %d wei to %s using channel %s", paymentAmount.Int64(), utils.Abbreviate(randomPayee.Address), utils.Abbreviate(channelId))

// TODO: Should we wait for receipt of this payment before stopping the time_to_first_payment timer?
})

// Perform between 1 and 5 payments additional payments
amountOfPayments := 1 + rand.Intn(4)
for i := 0; i < amountOfPayments; i++ {
// pay between 1 and 2 kwei
paymentAmount := big.NewInt(utils.KWEI_IN_WEI + (rand.Int63n(utils.KWEI_IN_WEI)))
nClient.Pay(channelId, paymentAmount)

runEnv.RecordMessage("Sent payment of %d wei to %s using channel %s", paymentAmount.Int64(), utils.Abbreviate(randomPayee.Address), utils.Abbreviate(channelId))

}

// TODO: If we attempt to close a virtual channel too fast we can cause other clients to fail.
// See https://github.com/statechannels/go-nitro/issues/744
time.Sleep(time.Duration(250 * time.Millisecond))

// TODO: get payment balance and output it to the log
runEnv.RecordMessage("Closing %s with payment to %s", utils.Abbreviate(channelId), utils.Abbreviate(randomPayee.Address))
closeId := nClient.CloseVirtualChannel(channelId)
cm.WaitForObjectivesToComplete([]protocols.ObjectiveId{closeId})

}

// Run the job(s)
utils.RunJobs(createVirtualPaymentsJob, runConfig.PaymentTestDuration, int64(runConfig.ConcurrentPaymentJobs))
}
client.MustSignalAndWait(ctx, "paymentsDone", runEnv.TestInstanceCount)

if me.Role != peer.Hub {
// TODO: Closing a ledger channel too soon after closing a virtual channel seems to fail.
time.Sleep(time.Duration(250 * time.Millisecond))
// Close all the ledger channels with the hub
oIds := []protocols.ObjectiveId{}
for _, ledgerId := range ledgerIds {
runEnv.RecordMessage("Closing ledger %s", utils.Abbreviate(ledgerId))
oId := nClient.CloseLedgerChannel(ledgerId)
oIds = append(oIds, oId)
}
cm.WaitForObjectivesToComplete(oIds)
runEnv.RecordMessage("All ledger channels closed")
}

// Record the mean time to first payment to nightly/ci metrics if applicable
// This allows us to track performance over time
mean := runEnv.D().Timer(fmt.Sprintf("time_to_first_payment,me=%s", me.Address)).Mean()
if runEnv.BooleanParam("isNightly") {
runEnv.R().RecordPoint(fmt.Sprintf("nightly_mean_time_to_first_payment,me=%s", me.Address), float64(mean))
}
if runEnv.BooleanParam("isCI") {
runEnv.R().RecordPoint(fmt.Sprintf("ci_mean_time_to_first_payment,me=%s", me.Address), float64(mean))
}

client.MustSignalAndWait(ctx, "done", runEnv.TestInstanceCount)

return nil

}
2 changes: 1 addition & 1 deletion tests/virtual-payment.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func CreateVirtualPaymentTest(runEnv *runtime.RunEnv, init *run.InitContext) err
ms := p2pms.NewMessageService(ipAddress, port, pk)
client.MustSignalAndWait(ctx, "msStarted", runEnv.TestInstanceCount)

mePeerInfo := peer.PeerInfo{PeerInfo: p2pms.PeerInfo{Address: address, IpAddress: ipAddress, Port: port, Id: ms.Id()}, Role: role}
mePeerInfo := peer.PeerInfo{PeerInfo: p2pms.PeerInfo{Address: address, IpAddress: ipAddress, Port: port, Id: ms.Id()}, Role: role, Seq: seq}
me := peer.MyInfo{PeerInfo: mePeerInfo, PrivateKey: *privateKey}

runEnv.RecordMessage("I am %+v", me)
Expand Down
53 changes: 50 additions & 3 deletions utils/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"math/big"
"math/rand"
"runtime/debug"
"sort"

s "sync"
"sync/atomic"
Expand Down Expand Up @@ -95,6 +96,19 @@ func RunJobs(job func(), duration time.Duration, concurrencyTarget int64) {
wg.Wait()
}

// AbbreviateSlice returns a string with abbreviated elements of the given slice.
func AbbreviateSlice[U ~[]T, T fmt.Stringer](col U) string {
abbreviated := ""
for i, s := range col {
if i > 0 {
abbreviated += ", "
}
abbreviated += s.String()[0:8]
}

return abbreviated
}

// Abbreviate shortens a string to 8 characters and adds an ellipsis.
func Abbreviate(s fmt.Stringer) string {
return s.String()[0:8] + ".."
Expand Down Expand Up @@ -128,16 +142,50 @@ func SelectRandom[U ~[]T, T any](collection U) T {
return collection[randomIndex]
}

// SelectRandomHubs selects numHub hubs randomly from hubs
func SelectRandomHubs(hubs []peer.PeerInfo, numHubs int) []types.Address {
// Copy and shuffle the slice of hubs
shuffled := make([]peer.PeerInfo, len(hubs))
copy(shuffled, hubs)
rand.Shuffle(len(shuffled),
func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })

// Select the amount of hubs we want
selected := make([]peer.PeerInfo, numHubs)
for i := 0; i < numHubs; i++ {
selected[i] = shuffled[i]
}

// TODO: Virtual defunding seems to fail if intermediaries are not in "order".
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To co-ordinate who initiates the ledger channel between two hubs I've introduced a simple rule: The participant with the lowest sequence number of the two hubs is responsible for creating the ledger channel.

It seems that the intermediaries need to passed in the correct order, based on the ledger initiator (I think?).
Since we use the sequence number to determine who initiates ledger channels we can just sort the intermediaries we pass in by their sequence number.

If I don't do this I get this error:

[0m panic: 0xA73B1683e0FCD74CEFF77aA5A90cAE1cF52d1c0F, error in run loop: signed proposal is not addressed to a known ledger connection {Signature:{R:[92 99 4 159 107 148 113 183 88 34 206 249 199 58 188 66 200 68 183 34 223 59 233 114 212 234 215 169 34 23 226 41] S:[123 131 214 254 250 117 65 42 222 212 113 236 31 15 36 251 243 88 198 162 117 13 215 51 6 198 227 98 105 176 200 126] V:28} Proposal:{LedgerID:0x1db0ab987f97ca8d2d21845016ebe74996102fb884b78c77efb88739a0b069c0 ToAdd:{Guarantee:{amount:<nil> target:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] left:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] right:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]} LeftDeposit:<nil>} ToRemove:{Target:0x6d8536f70e580266089f52475b49a95d97bed033b94a516b02a16803242e24f2 LeftAmount:+9999995628}} TurnNum:4}

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's get an issue open to track this.

// The order seems to be determined by the initiator of the ledger channel.
// Since we use the sequence number to determine the initiator, we can just sort on sequence number.
sort.Slice(selected, func(i, j int) bool {
return selected[i].Seq < selected[j].Seq
})

// Convert to addresses for the callers convenience
selectedAddresses := make([]types.Address, numHubs)
for i, hub := range selected {
selectedAddresses[i] = hub.Address
}
return selectedAddresses
}

// CreateLedgerChannels creates a directly funded ledger channel with each hub in hubs.
// The funding for each channel will be set to amount for both participants.
// This function blocks until all ledger channels have successfully been created.
func CreateLedgerChannels(client nitro.Client, cm *CompletionMonitor, amount uint, me peer.PeerInfo, peers []peer.PeerInfo) []types.Destination {
ids := []protocols.ObjectiveId{}
cIds := []types.Destination{}
for _, p := range peers {
if p.Role != peer.Hub {
hubs := peer.FilterByRole(peers, peer.Hub)
for _, p := range hubs {

// To co-ordinate creating ledger channels between hubs a hub will
// only create a channel with another hub if it has a greater sequence number.
if me.Role == peer.Hub && p.Seq <= me.Seq {
continue
}

outcome := outcome.Exit{outcome.SingleAssetExit{
Allocations: outcome.Allocations{
outcome.Allocation{
Expand All @@ -150,7 +198,6 @@ func CreateLedgerChannels(client nitro.Client, cm *CompletionMonitor, amount uin
},
},
}}

r := client.CreateLedgerChannel(p.Address, 0, outcome)
cIds = append(cIds, r.ChannelId)
ids = append(ids, r.Id)
Expand Down