diff --git a/benchmarks_test.go b/benchmarks_test.go deleted file mode 100644 index ef3582b3..00000000 --- a/benchmarks_test.go +++ /dev/null @@ -1,679 +0,0 @@ -package bitswap_test - -import ( - "context" - "encoding/json" - "fmt" - "math" - "math/rand" - "os" - "strconv" - "sync" - "testing" - "time" - - "github.com/ipfs/go-bitswap/internal/testutil" - blocks "github.com/ipfs/go-block-format" - protocol "github.com/libp2p/go-libp2p/core/protocol" - - "github.com/ipfs/go-bitswap" - bsnet "github.com/ipfs/go-bitswap/network" - testinstance "github.com/ipfs/go-bitswap/testinstance" - tn "github.com/ipfs/go-bitswap/testnet" - cid "github.com/ipfs/go-cid" - delay "github.com/ipfs/go-ipfs-delay" - mockrouting "github.com/ipfs/go-ipfs-routing/mock" -) - -type fetchFunc func(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) - -type distFunc func(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) - -type runStats struct { - DupsRcvd uint64 - BlksRcvd uint64 - MsgSent uint64 - MsgRecd uint64 - Time time.Duration - Name string -} - -var benchmarkLog []runStats - -type bench struct { - name string - nodeCount int - blockCount int - distFn distFunc - fetchFn fetchFunc -} - -var benches = []bench{ - // Fetch from two seed nodes that both have all 100 blocks - // - request one at a time, in series - {"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime}, - // - request all 100 with a single GetBlocks() call - {"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll}, - - // Fetch from two seed nodes, one at a time, where: - // - node A has blocks 0 - 74 - // - node B has blocks 25 - 99 - {"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime}, - - // Fetch from two seed nodes, where: - // - node A has even blocks - // - node B has odd blocks - // - both nodes have every third block - - // - request one at a time, in series - {"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime}, - // - request 10 at a time, in series - {"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10}, - // - request all 100 in parallel as individual GetBlock() calls - {"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent}, - // - request all 100 with a single GetBlocks() call - {"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll}, - // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) - {"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, - - // Fetch from nine seed nodes, all nodes have all blocks - // - request one at a time, in series - {"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime}, - // - request 10 at a time, in series - {"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10}, - // - request all 100 with a single GetBlocks() call - {"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll}, - // - request all 100 in parallel as individual GetBlock() calls - {"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent}, - // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) - {"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch}, - // - follow a typical IPFS request pattern for 1000 blocks - {"10Nodes-AllToAll-UnixfsFetchLarge", 10, 1000, allToAll, unixfsFileFetchLarge}, - - // Fetch from nine seed nodes, blocks are distributed randomly across all nodes (no dups) - // - request one at a time, in series - {"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime}, - // - request all 100 with a single GetBlocks() call - {"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll}, - // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) - {"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch}, - - // Fetch from 199 seed nodes, all nodes have all blocks, fetch all 20 blocks with a single GetBlocks() call - {"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll}, -} - -func BenchmarkFixedDelay(b *testing.B) { - benchmarkLog = nil - fixedDelay := delay.Fixed(10 * time.Millisecond) - bstoreLatency := time.Duration(0) - - for _, bch := range benches { - b.Run(bch.name, func(b *testing.B) { - subtestDistributeAndFetch(b, bch.nodeCount, bch.blockCount, fixedDelay, bstoreLatency, bch.distFn, bch.fetchFn) - }) - } - - out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/benchmark.json", out, 0666) - printResults(benchmarkLog) -} - -type mixedBench struct { - bench - fetcherCount int // number of nodes that fetch data - oldSeedCount int // number of seed nodes running old version of Bitswap -} - -var mixedBenches = []mixedBench{ - {bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2}, - {bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2}, - {bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2}, - // mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2}, -} - -func BenchmarkFetchFromOldBitswap(b *testing.B) { - benchmarkLog = nil - fixedDelay := delay.Fixed(10 * time.Millisecond) - bstoreLatency := time.Duration(0) - - for _, bch := range mixedBenches { - b.Run(bch.name, func(b *testing.B) { - fetcherCount := bch.fetcherCount - oldSeedCount := bch.oldSeedCount - newSeedCount := bch.nodeCount - (fetcherCount + oldSeedCount) - - net := tn.VirtualNetwork(mockrouting.NewServer(), fixedDelay) - - // Simulate an older Bitswap node (old protocol ID) that doesn't - // send DONT_HAVE responses - oldProtocol := []protocol.ID{bsnet.ProtocolBitswapOneOne} - oldNetOpts := []bsnet.NetOpt{bsnet.SupportedProtocols(oldProtocol)} - oldBsOpts := []bitswap.Option{bitswap.SetSendDontHaves(false)} - oldNodeGenerator := testinstance.NewTestInstanceGenerator(net, oldNetOpts, oldBsOpts) - - // Regular new Bitswap node - newNodeGenerator := testinstance.NewTestInstanceGenerator(net, nil, nil) - var instances []testinstance.Instance - - // Create new nodes (fetchers + seeds) - for i := 0; i < fetcherCount+newSeedCount; i++ { - inst := newNodeGenerator.Next() - instances = append(instances, inst) - } - // Create old nodes (just seeds) - for i := 0; i < oldSeedCount; i++ { - inst := oldNodeGenerator.Next() - instances = append(instances, inst) - } - // Connect all the nodes together - testinstance.ConnectInstances(instances) - - // Generate blocks, with a smaller root block - rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) - blocks := testutil.GenerateBlocksOfSize(bch.blockCount, stdBlockSize) - blocks[0] = rootBlock[0] - - // Run the distribution - runDistributionMulti(b, instances[:fetcherCount], instances[fetcherCount:], blocks, bstoreLatency, bch.distFn, bch.fetchFn) - - newNodeGenerator.Close() - oldNodeGenerator.Close() - }) - } - - out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/benchmark.json", out, 0666) - printResults(benchmarkLog) -} - -const datacenterSpeed = 5 * time.Millisecond -const fastSpeed = 60 * time.Millisecond -const mediumSpeed = 200 * time.Millisecond -const slowSpeed = 800 * time.Millisecond -const superSlowSpeed = 4000 * time.Millisecond -const datacenterDistribution = 3 * time.Millisecond -const distribution = 20 * time.Millisecond -const datacenterBandwidth = 125000000.0 -const datacenterBandwidthDeviation = 3000000.0 -const fastBandwidth = 1250000.0 -const fastBandwidthDeviation = 300000.0 -const mediumBandwidth = 500000.0 -const mediumBandwidthDeviation = 80000.0 -const slowBandwidth = 100000.0 -const slowBandwidthDeviation = 16500.0 -const rootBlockSize = 800 -const stdBlockSize = 8000 -const largeBlockSize = int64(256 * 1024) - -func BenchmarkRealWorld(b *testing.B) { - benchmarkLog = nil - benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) - var randomGen *rand.Rand = nil - if err == nil { - randomGen = rand.New(rand.NewSource(benchmarkSeed)) - } - - fastNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( - mediumSpeed-fastSpeed, slowSpeed-fastSpeed, - 0.0, 0.0, distribution, randomGen) - fastNetworkDelay := delay.Delay(fastSpeed, fastNetworkDelayGenerator) - fastBandwidthGenerator := tn.VariableRateLimitGenerator(fastBandwidth, fastBandwidthDeviation, randomGen) - averageNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( - mediumSpeed-fastSpeed, slowSpeed-fastSpeed, - 0.3, 0.3, distribution, randomGen) - averageNetworkDelay := delay.Delay(fastSpeed, averageNetworkDelayGenerator) - averageBandwidthGenerator := tn.VariableRateLimitGenerator(mediumBandwidth, mediumBandwidthDeviation, randomGen) - slowNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( - mediumSpeed-fastSpeed, superSlowSpeed-fastSpeed, - 0.3, 0.3, distribution, randomGen) - slowNetworkDelay := delay.Delay(fastSpeed, slowNetworkDelayGenerator) - slowBandwidthGenerator := tn.VariableRateLimitGenerator(slowBandwidth, slowBandwidthDeviation, randomGen) - bstoreLatency := time.Duration(0) - - b.Run("200Nodes-AllToAll-BigBatch-FastNetwork", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 300, 200, fastNetworkDelay, fastBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) - }) - b.Run("200Nodes-AllToAll-BigBatch-AverageVariableSpeedNetwork", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 300, 200, averageNetworkDelay, averageBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) - }) - b.Run("200Nodes-AllToAll-BigBatch-SlowVariableSpeedNetwork", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) - }) - out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/rw-benchmark.json", out, 0666) - printResults(benchmarkLog) -} - -func BenchmarkDatacenter(b *testing.B) { - benchmarkLog = nil - benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) - var randomGen *rand.Rand = nil - if err == nil { - randomGen = rand.New(rand.NewSource(benchmarkSeed)) - } - - datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( - fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, - 0.0, 0.0, datacenterDistribution, randomGen) - datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) - datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) - bstoreLatency := time.Millisecond * 25 - - b.Run("3Nodes-Overlap3-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 3, 100, datacenterNetworkDelay, datacenterBandwidthGenerator, largeBlockSize, bstoreLatency, allToAll, unixfsFileFetch) - }) - out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/rb-benchmark.json", out, 0666) - printResults(benchmarkLog) -} - -func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { - benchmarkLog = nil - benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) - var randomGen *rand.Rand = nil - if err == nil { - randomGen = rand.New(rand.NewSource(benchmarkSeed)) - } - - datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( - fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, - 0.0, 0.0, datacenterDistribution, randomGen) - datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) - datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) - bstoreLatency := time.Millisecond * 25 - - b.Run("3Leech3Seed-AllToAll-UnixfsFetch", func(b *testing.B) { - d := datacenterNetworkDelay - rateLimitGenerator := datacenterBandwidthGenerator - blockSize := largeBlockSize - df := allToAll - ff := unixfsFileFetchLarge - numnodes := 6 - numblks := 1000 - - for i := 0; i < b.N; i++ { - net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) - - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - - instances := ig.Instances(numnodes) - blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) - runDistributionMulti(b, instances[:3], instances[3:], blocks, bstoreLatency, df, ff) - } - }) - - out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/rb-benchmark.json", out, 0666) - printResults(benchmarkLog) -} - -func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { - for i := 0; i < b.N; i++ { - net := tn.VirtualNetwork(mockrouting.NewServer(), d) - - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - - instances := ig.Instances(numnodes) - rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) - blocks := testutil.GenerateBlocksOfSize(numblks, stdBlockSize) - blocks[0] = rootBlock[0] - runDistribution(b, instances, blocks, bstoreLatency, df, ff) - ig.Close() - } -} - -func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { - for i := 0; i < b.N; i++ { - net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) - - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - - instances := ig.Instances(numnodes) - rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) - blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) - blocks[0] = rootBlock[0] - runDistribution(b, instances, blocks, bstoreLatency, df, ff) - } -} - -func runDistributionMulti(b *testing.B, fetchers []testinstance.Instance, seeds []testinstance.Instance, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { - // Distribute blocks to seed nodes - df(b, seeds, blocks) - - // Set the blockstore latency on seed nodes - if bstoreLatency > 0 { - for _, i := range seeds { - i.SetBlockstoreLatency(bstoreLatency) - } - } - - // Fetch blocks (from seed nodes to leech nodes) - var ks []cid.Cid - for _, blk := range blocks { - ks = append(ks, blk.Cid()) - } - - start := time.Now() - var wg sync.WaitGroup - for _, fetcher := range fetchers { - wg.Add(1) - - go func(ftchr testinstance.Instance) { - defer wg.Done() - - ff(b, ftchr.Exchange, ks) - }(fetcher) - } - wg.Wait() - - // Collect statistics - fetcher := fetchers[0] - st, err := fetcher.Exchange.Stat() - if err != nil { - b.Fatal(err) - } - - for _, fetcher := range fetchers { - nst := fetcher.Adapter.Stats() - stats := runStats{ - Time: time.Since(start), - MsgRecd: nst.MessagesRecvd, - MsgSent: nst.MessagesSent, - DupsRcvd: st.DupBlksReceived, - BlksRcvd: st.BlocksReceived, - Name: b.Name(), - } - benchmarkLog = append(benchmarkLog, stats) - } - // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) -} - -func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { - numnodes := len(instances) - fetcher := instances[numnodes-1] - - // Distribute blocks to seed nodes - seeds := instances[:numnodes-1] - df(b, seeds, blocks) - - // Set the blockstore latency on seed nodes - if bstoreLatency > 0 { - for _, i := range seeds { - i.SetBlockstoreLatency(bstoreLatency) - } - } - - // Fetch blocks (from seed nodes to leech nodes) - var ks []cid.Cid - for _, blk := range blocks { - ks = append(ks, blk.Cid()) - } - - start := time.Now() - ff(b, fetcher.Exchange, ks) - - // Collect statistics - st, err := fetcher.Exchange.Stat() - if err != nil { - b.Fatal(err) - } - - nst := fetcher.Adapter.Stats() - stats := runStats{ - Time: time.Since(start), - MsgRecd: nst.MessagesRecvd, - MsgSent: nst.MessagesSent, - DupsRcvd: st.DupBlksReceived, - BlksRcvd: st.BlocksReceived, - Name: b.Name(), - } - benchmarkLog = append(benchmarkLog, stats) - // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) -} - -func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { - for _, p := range provs { - if err := p.Blockstore().PutMany(context.Background(), blocks); err != nil { - b.Fatal(err) - } - } -} - -// overlap1 gives the first 75 blocks to the first peer, and the last 75 blocks -// to the second peer. This means both peers have the middle 50 blocks -func overlap1(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { - if len(provs) != 2 { - b.Fatal("overlap1 only works with 2 provs") - } - bill := provs[0] - jeff := provs[1] - - if err := bill.Blockstore().PutMany(context.Background(), blks[:75]); err != nil { - b.Fatal(err) - } - if err := jeff.Blockstore().PutMany(context.Background(), blks[25:]); err != nil { - b.Fatal(err) - } -} - -// overlap2 gives every even numbered block to the first peer, odd numbered -// blocks to the second. it also gives every third block to both peers -func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { - if len(provs) != 2 { - b.Fatal("overlap2 only works with 2 provs") - } - bill := provs[0] - jeff := provs[1] - - for i, blk := range blks { - even := i%2 == 0 - third := i%3 == 0 - if third || even { - if err := bill.Blockstore().Put(context.Background(), blk); err != nil { - b.Fatal(err) - } - } - if third || !even { - if err := jeff.Blockstore().Put(context.Background(), blk); err != nil { - b.Fatal(err) - } - } - } -} - -// onePeerPerBlock picks a random peer to hold each block -// with this layout, we shouldnt actually ever see any duplicate blocks -// but we're mostly just testing performance of the sync algorithm -func onePeerPerBlock(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { - for _, blk := range blks { - err := provs[rand.Intn(len(provs))].Blockstore().Put(context.Background(), blk) - if err != nil { - b.Fatal(err) - } - } -} - -func oneAtATime(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { - ses := bs.NewSession(context.Background()) - for _, c := range ks { - _, err := ses.GetBlock(context.Background(), c) - if err != nil { - b.Fatal(err) - } - } - // b.Logf("Session fetch latency: %s", ses.GetAverageLatency()) -} - -// fetch data in batches, 10 at a time -func batchFetchBy10(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { - ses := bs.NewSession(context.Background()) - for i := 0; i < len(ks); i += 10 { - out, err := ses.GetBlocks(context.Background(), ks[i:i+10]) - if err != nil { - b.Fatal(err) - } - for range out { - } - } -} - -// fetch each block at the same time concurrently -func fetchAllConcurrent(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { - ses := bs.NewSession(context.Background()) - - var wg sync.WaitGroup - for _, c := range ks { - wg.Add(1) - go func(c cid.Cid) { - defer wg.Done() - _, err := ses.GetBlock(context.Background(), c) - if err != nil { - b.Error(err) - } - }(c) - } - wg.Wait() -} - -func batchFetchAll(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { - ses := bs.NewSession(context.Background()) - out, err := ses.GetBlocks(context.Background(), ks) - if err != nil { - b.Fatal(err) - } - for range out { - } -} - -// simulates the fetch pattern of trying to sync a unixfs file graph as fast as possible -func unixfsFileFetch(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { - ses := bs.NewSession(context.Background()) - _, err := ses.GetBlock(context.Background(), ks[0]) - if err != nil { - b.Fatal(err) - } - - out, err := ses.GetBlocks(context.Background(), ks[1:11]) - if err != nil { - b.Fatal(err) - } - for range out { - } - - out, err = ses.GetBlocks(context.Background(), ks[11:]) - if err != nil { - b.Fatal(err) - } - for range out { - } -} - -func unixfsFileFetchLarge(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { - ses := bs.NewSession(context.Background()) - _, err := ses.GetBlock(context.Background(), ks[0]) - if err != nil { - b.Fatal(err) - } - - out, err := ses.GetBlocks(context.Background(), ks[1:11]) - if err != nil { - b.Fatal(err) - } - for range out { - } - - out, err = ses.GetBlocks(context.Background(), ks[11:100]) - if err != nil { - b.Fatal(err) - } - for range out { - } - - rest := ks[100:] - for len(rest) > 0 { - var batch [][]cid.Cid - for i := 0; i < 5 && len(rest) > 0; i++ { - cnt := 10 - if len(rest) < 10 { - cnt = len(rest) - } - group := rest[:cnt] - rest = rest[cnt:] - batch = append(batch, group) - } - - var anyErr error - var wg sync.WaitGroup - for _, group := range batch { - wg.Add(1) - go func(grp []cid.Cid) { - defer wg.Done() - - out, err = ses.GetBlocks(context.Background(), grp) - if err != nil { - anyErr = err - } - for range out { - } - }(group) - } - wg.Wait() - - // Note: b.Fatal() cannot be called from within a go-routine - if anyErr != nil { - b.Fatal(anyErr) - } - } -} - -func printResults(rs []runStats) { - nameOrder := make([]string, 0) - names := make(map[string]struct{}) - for i := 0; i < len(rs); i++ { - if _, ok := names[rs[i].Name]; !ok { - nameOrder = append(nameOrder, rs[i].Name) - names[rs[i].Name] = struct{}{} - } - } - - for i := 0; i < len(names); i++ { - name := nameOrder[i] - count := 0 - sent := 0.0 - rcvd := 0.0 - dups := 0.0 - blks := 0.0 - elpd := 0.0 - for i := 0; i < len(rs); i++ { - if rs[i].Name == name { - count++ - sent += float64(rs[i].MsgSent) - rcvd += float64(rs[i].MsgRecd) - dups += float64(rs[i].DupsRcvd) - blks += float64(rs[i].BlksRcvd) - elpd += float64(rs[i].Time) - } - } - sent /= float64(count) - rcvd /= float64(count) - dups /= float64(count) - blks /= float64(count) - - label := fmt.Sprintf("%s (%d runs / %.2fs):", name, count, elpd/1000000000.0) - fmt.Printf("%-75s %s: sent %d, recv %d, dups %d / %d\n", - label, - fmtDuration(time.Duration(int64(math.Round(elpd/float64(count))))), - int64(math.Round(sent)), int64(math.Round(rcvd)), - int64(math.Round(dups)), int64(math.Round(blks))) - } -} - -func fmtDuration(d time.Duration) string { - d = d.Round(time.Millisecond) - s := d / time.Second - d -= s * time.Second - ms := d / time.Millisecond - return fmt.Sprintf("%d.%03ds", s, ms) -} diff --git a/bitswap.go b/bitswap.go index ea776c36..49e3a10d 100644 --- a/bitswap.go +++ b/bitswap.go @@ -2,180 +2,23 @@ package bitswap import ( "context" - "fmt" - "github.com/ipfs/go-bitswap/client" - "github.com/ipfs/go-bitswap/internal/defaults" - "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-bitswap/server" - "github.com/ipfs/go-bitswap/tracer" - "github.com/ipfs/go-metrics-interface" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - logging "github.com/ipfs/go-log" - "github.com/libp2p/go-libp2p/core/peer" - - "go.uber.org/multierr" + libipfs "github.com/ipfs/go-libipfs/bitswap" ) -var log = logging.Logger("bitswap") - -// old interface we are targeting -type bitswap interface { - Close() error - GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) - GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) - GetWantBlocks() []cid.Cid - GetWantHaves() []cid.Cid - GetWantlist() []cid.Cid - IsOnline() bool - LedgerForPeer(p peer.ID) *server.Receipt - NewSession(ctx context.Context) exchange.Fetcher - NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error - PeerConnected(p peer.ID) - PeerDisconnected(p peer.ID) - ReceiveError(err error) - ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) - Stat() (*Stat, error) - WantlistForPeer(p peer.ID) []cid.Cid -} - -var _ exchange.SessionExchange = (*Bitswap)(nil) -var _ bitswap = (*Bitswap)(nil) -var HasBlockBufferSize = defaults.HasBlockBufferSize +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.HasBlockBufferSize instead +var HasBlockBufferSize = libipfs.HasBlockBufferSize -type Bitswap struct { - *client.Client - *server.Server - - tracer tracer.Tracer - net network.BitSwapNetwork -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.Bitswap instead +type Bitswap = libipfs.Bitswap +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.New instead func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Bitswap { - bs := &Bitswap{ - net: net, - } - - var serverOptions []server.Option - var clientOptions []client.Option - - for _, o := range options { - switch typedOption := o.v.(type) { - case server.Option: - serverOptions = append(serverOptions, typedOption) - case client.Option: - clientOptions = append(clientOptions, typedOption) - case option: - typedOption(bs) - default: - panic(fmt.Errorf("unknown option type passed to bitswap.New, got: %T, %v; expected: %T, %T or %T", typedOption, typedOption, server.Option(nil), client.Option(nil), option(nil))) - } - } - - if bs.tracer != nil { - var tracer tracer.Tracer = nopReceiveTracer{bs.tracer} - clientOptions = append(clientOptions, client.WithTracer(tracer)) - serverOptions = append(serverOptions, server.WithTracer(tracer)) - } - - if HasBlockBufferSize != defaults.HasBlockBufferSize { - serverOptions = append(serverOptions, server.HasBlockBufferSize(HasBlockBufferSize)) - } - - ctx = metrics.CtxSubScope(ctx, "bitswap") - - bs.Server = server.New(ctx, net, bstore, serverOptions...) - bs.Client = client.New(ctx, net, bstore, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) - net.Start(bs) // use the polyfill receiver to log received errors and trace messages only once - - return bs -} - -func (bs *Bitswap) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { - return multierr.Combine( - bs.Client.NotifyNewBlocks(ctx, blks...), - bs.Server.NotifyNewBlocks(ctx, blks...), - ) -} - -type Stat struct { - Wantlist []cid.Cid - Peers []string - BlocksReceived uint64 - DataReceived uint64 - DupBlksReceived uint64 - DupDataReceived uint64 - MessagesReceived uint64 - BlocksSent uint64 - DataSent uint64 - ProvideBufLen int -} - -func (bs *Bitswap) Stat() (*Stat, error) { - cs, err := bs.Client.Stat() - if err != nil { - return nil, err - } - ss, err := bs.Server.Stat() - if err != nil { - return nil, err - } - - return &Stat{ - Wantlist: cs.Wantlist, - BlocksReceived: cs.BlocksReceived, - DataReceived: cs.DataReceived, - DupBlksReceived: cs.DupBlksReceived, - DupDataReceived: cs.DupDataReceived, - MessagesReceived: cs.MessagesReceived, - Peers: ss.Peers, - BlocksSent: ss.BlocksSent, - DataSent: ss.DataSent, - ProvideBufLen: ss.ProvideBufLen, - }, nil -} - -func (bs *Bitswap) Close() error { - bs.net.Stop() - return multierr.Combine( - bs.Client.Close(), - bs.Server.Close(), - ) -} - -func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { - if p == bs.net.Self() { - return bs.Client.GetWantlist() - } - return bs.Server.WantlistForPeer(p) + return libipfs.New(ctx, net, bstore, options...) } -func (bs *Bitswap) PeerConnected(p peer.ID) { - bs.Client.PeerConnected(p) - bs.Server.PeerConnected(p) -} - -func (bs *Bitswap) PeerDisconnected(p peer.ID) { - bs.Client.PeerDisconnected(p) - bs.Server.PeerDisconnected(p) -} - -func (bs *Bitswap) ReceiveError(err error) { - log.Infof("Bitswap Client ReceiveError: %s", err) - // TODO log the network error - // TODO bubble the network error up to the parent context/error logger -} - -func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { - if bs.tracer != nil { - bs.tracer.MessageReceived(p, incoming) - } - - bs.Client.ReceiveMessage(ctx, p, incoming) - bs.Server.ReceiveMessage(ctx, p, incoming) -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.Stat instead +type Stat = libipfs.Stat diff --git a/bitswap_test.go b/bitswap_test.go deleted file mode 100644 index 2ab4547e..00000000 --- a/bitswap_test.go +++ /dev/null @@ -1,830 +0,0 @@ -package bitswap_test - -import ( - "bytes" - "context" - "fmt" - "os" - "sync" - "testing" - "time" - - "github.com/ipfs/go-bitswap" - bsmsg "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/server" - testinstance "github.com/ipfs/go-bitswap/testinstance" - tn "github.com/ipfs/go-bitswap/testnet" - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - detectrace "github.com/ipfs/go-detect-race" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - delay "github.com/ipfs/go-ipfs-delay" - mockrouting "github.com/ipfs/go-ipfs-routing/mock" - ipld "github.com/ipfs/go-ipld-format" - tu "github.com/libp2p/go-libp2p-testing/etc" - p2ptestutil "github.com/libp2p/go-libp2p-testing/netutil" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -func isCI() bool { - // https://github.blog/changelog/2020-04-15-github-actions-sets-the-ci-environment-variable-to-true/ - return os.Getenv("CI") != "" -} - -func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk blocks.Block) { - t.Helper() - err := inst.Blockstore().Put(ctx, blk) - if err != nil { - t.Fatal(err) - } - err = inst.Exchange.NotifyNewBlocks(ctx, blk) - if err != nil { - t.Fatal(err) - } -} - -// FIXME the tests are really sensitive to the network delay. fix them to work -// well under varying conditions -const kNetworkDelay = 0 * time.Millisecond - -func TestClose(t *testing.T) { - vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - block := bgen.Next() - bitswap := ig.Next() - - bitswap.Exchange.Close() - _, err := bitswap.Exchange.GetBlock(context.Background(), block.Cid()) - if err == nil { - t.Fatal("expected GetBlock to fail") - } -} - -func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this - - rs := mockrouting.NewServer() - net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - - block := blocks.NewBlock([]byte("block")) - pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) - err := rs.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network - if err != nil { - t.Fatal(err) - } - - solo := ig.Next() - defer solo.Exchange.Close() - - ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) - defer cancel() - _, err = solo.Exchange.GetBlock(ctx, block.Cid()) - - if err != context.DeadlineExceeded { - t.Fatal("Expected DeadlineExceeded error") - } -} - -func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { - - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - block := blocks.NewBlock([]byte("block")) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - - peers := ig.Instances(2) - hasBlock := peers[0] - defer hasBlock.Exchange.Close() - - addBlock(t, context.Background(), hasBlock, block) - - wantsBlock := peers[1] - defer wantsBlock.Exchange.Close() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - received, err := wantsBlock.Exchange.GetBlock(ctx, block.Cid()) - if err != nil { - t.Log(err) - t.Fatal("Expected to succeed") - } - - if !bytes.Equal(block.RawData(), received.RawData()) { - t.Fatal("Data doesn't match") - } -} - -func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - block := blocks.NewBlock([]byte("block")) - bsOpts := []bitswap.Option{bitswap.ProvideEnabled(false), bitswap.ProviderSearchDelay(50 * time.Millisecond)} - ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) - defer ig.Close() - - hasBlock := ig.Next() - defer hasBlock.Exchange.Close() - - wantsBlock := ig.Next() - defer wantsBlock.Exchange.Close() - - addBlock(t, context.Background(), hasBlock, block) - - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Millisecond) - defer cancel() - - ns := wantsBlock.Exchange.NewSession(ctx) - - received, err := ns.GetBlock(ctx, block.Cid()) - if received != nil { - t.Fatalf("Expected to find nothing, found %s", received) - } - - if err != context.DeadlineExceeded { - t.Fatal("Expected deadline exceeded") - } -} - -// Tests that a received block is not stored in the blockstore if the block was -// not requested by the client -func TestUnwantedBlockNotAdded(t *testing.T) { - - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - block := blocks.NewBlock([]byte("block")) - bsMessage := bsmsg.New(true) - bsMessage.AddBlock(block) - - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - - peers := ig.Instances(2) - hasBlock := peers[0] - defer hasBlock.Exchange.Close() - - addBlock(t, context.Background(), hasBlock, block) - - doesNotWantBlock := peers[1] - defer doesNotWantBlock.Exchange.Close() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - doesNotWantBlock.Exchange.ReceiveMessage(ctx, hasBlock.Peer, bsMessage) - - blockInStore, err := doesNotWantBlock.Blockstore().Has(ctx, block.Cid()) - if err != nil || blockInStore { - t.Fatal("Unwanted block added to block store") - } -} - -// Tests that a received block is returned to the client and stored in the -// blockstore in the following scenario: -// - the want for the block has been requested by the client -// - the want for the block has not yet been sent out to a peer -// -// (because the live request queue is full) -func TestPendingBlockAdded(t *testing.T) { - ctx := context.Background() - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - bg := blocksutil.NewBlockGenerator() - sessionBroadcastWantCapacity := 4 - - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - - instance := ig.Instances(1)[0] - defer instance.Exchange.Close() - - oneSecCtx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - // Request enough blocks to exceed the session's broadcast want list - // capacity (by one block). The session will put the remaining block - // into the "tofetch" queue - blks := bg.Blocks(sessionBroadcastWantCapacity + 1) - ks := make([]cid.Cid, 0, len(blks)) - for _, b := range blks { - ks = append(ks, b.Cid()) - } - outch, err := instance.Exchange.GetBlocks(ctx, ks) - if err != nil { - t.Fatal(err) - } - - // Wait a little while to make sure the session has time to process the wants - time.Sleep(time.Millisecond * 20) - - // Simulate receiving a message which contains the block in the "tofetch" queue - lastBlock := blks[len(blks)-1] - bsMessage := bsmsg.New(true) - bsMessage.AddBlock(lastBlock) - unknownPeer := peer.ID("QmUHfvCQrzyR6vFXmeyCptfCWedfcmfa12V6UuziDtrw23") - instance.Exchange.ReceiveMessage(oneSecCtx, unknownPeer, bsMessage) - - // Make sure Bitswap adds the block to the output channel - blkrecvd, ok := <-outch - if !ok { - t.Fatal("timed out waiting for block") - } - if !blkrecvd.Cid().Equals(lastBlock.Cid()) { - t.Fatal("received wrong block") - } -} - -func TestLargeSwarm(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - numInstances := 100 - numBlocks := 2 - if detectrace.WithRace() { - // when running with the race detector, 500 instances launches - // well over 8k goroutines. This hits a race detector limit. - numInstances = 20 - } else if isCI() { - numInstances = 200 - } else { - t.Parallel() - } - PerformDistributionTest(t, numInstances, numBlocks) -} - -func TestLargeFile(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - - if !isCI() { - t.Parallel() - } - - numInstances := 10 - numBlocks := 100 - PerformDistributionTest(t, numInstances, numBlocks) -} - -func TestLargeFileTwoPeers(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - numInstances := 2 - numBlocks := 100 - PerformDistributionTest(t, numInstances, numBlocks) -} - -func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { - ctx := context.Background() - if testing.Short() { - t.SkipNow() - } - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, []bitswap.Option{ - bitswap.TaskWorkerCount(5), - bitswap.EngineTaskWorkerCount(5), - bitswap.MaxOutstandingBytesPerPeer(1 << 20), - }) - defer ig.Close() - bg := blocksutil.NewBlockGenerator() - - instances := ig.Instances(numInstances) - blocks := bg.Blocks(numBlocks) - - t.Log("Give the blocks to the first instance") - - var blkeys []cid.Cid - first := instances[0] - for _, b := range blocks { - blkeys = append(blkeys, b.Cid()) - addBlock(t, ctx, first, b) - } - - t.Log("Distribute!") - - wg := sync.WaitGroup{} - errs := make(chan error) - - for _, inst := range instances[1:] { - wg.Add(1) - go func(inst testinstance.Instance) { - defer wg.Done() - outch, err := inst.Exchange.GetBlocks(ctx, blkeys) - if err != nil { - errs <- err - } - for range outch { - } - }(inst) - } - - go func() { - wg.Wait() - close(errs) - }() - - for err := range errs { - if err != nil { - t.Fatal(err) - } - } -} - -// TODO simplify this test. get to the _essence_! -func TestSendToWantingPeer(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - bg := blocksutil.NewBlockGenerator() - - peers := ig.Instances(2) - peerA := peers[0] - peerB := peers[1] - - t.Logf("Session %v\n", peerA.Peer) - t.Logf("Session %v\n", peerB.Peer) - - waitTime := time.Second * 5 - - alpha := bg.Next() - // peerA requests and waits for block alpha - ctx, cancel := context.WithTimeout(context.Background(), waitTime) - defer cancel() - alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []cid.Cid{alpha.Cid()}) - if err != nil { - t.Fatal(err) - } - - // peerB announces to the network that he has block alpha - addBlock(t, ctx, peerB, alpha) - - // At some point, peerA should get alpha (or timeout) - blkrecvd, ok := <-alphaPromise - if !ok { - t.Fatal("context timed out and broke promise channel!") - } - - if !blkrecvd.Cid().Equals(alpha.Cid()) { - t.Fatal("Wrong block!") - } - -} - -func TestEmptyKey(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - bs := ig.Instances(1)[0].Exchange - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - _, err := bs.GetBlock(ctx, cid.Cid{}) - if !ipld.IsNotFound(err) { - t.Error("empty str key should return ErrNotFound") - } -} - -func assertStat(t *testing.T, st *bitswap.Stat, sblks, rblks, sdata, rdata uint64) { - if sblks != st.BlocksSent { - t.Errorf("mismatch in blocks sent: %d vs %d", sblks, st.BlocksSent) - } - - if rblks != st.BlocksReceived { - t.Errorf("mismatch in blocks recvd: %d vs %d", rblks, st.BlocksReceived) - } - - if sdata != st.DataSent { - t.Errorf("mismatch in data sent: %d vs %d", sdata, st.DataSent) - } - - if rdata != st.DataReceived { - t.Errorf("mismatch in data recvd: %d vs %d", rdata, st.DataReceived) - } -} - -func TestBasicBitswap(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - bg := blocksutil.NewBlockGenerator() - - t.Log("Test a one node trying to get one block from another") - - instances := ig.Instances(3) - blocks := bg.Blocks(1) - - // First peer has block - addBlock(t, context.Background(), instances[0], blocks[0]) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - // Second peer broadcasts want for block CID - // (Received by first and third peers) - blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) - if err != nil { - t.Fatal(err) - } - - // When second peer receives block, it should send out a cancel, so third - // peer should no longer keep second peer's want - if err = tu.WaitFor(ctx, func() error { - if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 { - return fmt.Errorf("should have no items in other peers wantlist") - } - if len(instances[1].Exchange.GetWantlist()) != 0 { - return fmt.Errorf("shouldnt have anything in wantlist") - } - return nil - }); err != nil { - t.Fatal(err) - } - - st0, err := instances[0].Exchange.Stat() - if err != nil { - t.Fatal(err) - } - st1, err := instances[1].Exchange.Stat() - if err != nil { - t.Fatal(err) - } - - st2, err := instances[2].Exchange.Stat() - if err != nil { - t.Fatal(err) - } - - t.Log("stat node 0") - assertStat(t, st0, 1, 0, uint64(len(blk.RawData())), 0) - t.Log("stat node 1") - assertStat(t, st1, 0, 1, 0, uint64(len(blk.RawData()))) - t.Log("stat node 2") - assertStat(t, st2, 0, 0, 0, 0) - - if !bytes.Equal(blk.RawData(), blocks[0].RawData()) { - t.Errorf("blocks aren't equal: expected %v, actual %v", blocks[0].RawData(), blk.RawData()) - } - - t.Log(blk) - for _, inst := range instances { - err := inst.Exchange.Close() - if err != nil { - t.Fatal(err) - } - } -} - -func TestDoubleGet(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - bg := blocksutil.NewBlockGenerator() - - t.Log("Test a one node trying to get one block from another") - - instances := ig.Instances(2) - blocks := bg.Blocks(1) - - // NOTE: A race condition can happen here where these GetBlocks requests go - // through before the peers even get connected. This is okay, bitswap - // *should* be able to handle this. - ctx1, cancel1 := context.WithCancel(context.Background()) - blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []cid.Cid{blocks[0].Cid()}) - if err != nil { - t.Fatal(err) - } - - ctx2, cancel2 := context.WithCancel(context.Background()) - defer cancel2() - - blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []cid.Cid{blocks[0].Cid()}) - if err != nil { - t.Fatal(err) - } - - // ensure both requests make it into the wantlist at the same time - time.Sleep(time.Millisecond * 20) - cancel1() - - _, ok := <-blkch1 - if ok { - t.Fatal("expected channel to be closed") - } - - addBlock(t, context.Background(), instances[0], blocks[0]) - - select { - case blk, ok := <-blkch2: - if !ok { - t.Fatal("expected to get the block here") - } - t.Log(blk) - case <-time.After(time.Second * 5): - p1wl := instances[0].Exchange.WantlistForPeer(instances[1].Peer) - if len(p1wl) != 1 { - t.Logf("wantlist view didnt have 1 item (had %d)", len(p1wl)) - } else if !p1wl[0].Equals(blocks[0].Cid()) { - t.Logf("had 1 item, it was wrong: %s %s", blocks[0].Cid(), p1wl[0]) - } else { - t.Log("had correct wantlist, somehow") - } - t.Fatal("timed out waiting on block") - } - - for _, inst := range instances { - err := inst.Exchange.Close() - if err != nil { - t.Fatal(err) - } - } -} - -func TestWantlistCleanup(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - bg := blocksutil.NewBlockGenerator() - - instances := ig.Instances(2) - instance := instances[0] - bswap := instance.Exchange - blocks := bg.Blocks(20) - - var keys []cid.Cid - for _, b := range blocks { - keys = append(keys, b.Cid()) - } - - // Once context times out, key should be removed from wantlist - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) - defer cancel() - _, err := bswap.GetBlock(ctx, keys[0]) - if err != context.DeadlineExceeded { - t.Fatal("shouldnt have fetched any blocks") - } - - time.Sleep(time.Millisecond * 50) - - if len(bswap.GetWantHaves()) > 0 { - t.Fatal("should not have anyting in wantlist") - } - - // Once context times out, keys should be removed from wantlist - ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) - defer cancel() - _, err = bswap.GetBlocks(ctx, keys[:10]) - if err != nil { - t.Fatal(err) - } - - <-ctx.Done() - time.Sleep(time.Millisecond * 50) - - if len(bswap.GetWantHaves()) > 0 { - t.Fatal("should not have anyting in wantlist") - } - - // Send want for single block, with no timeout - _, err = bswap.GetBlocks(context.Background(), keys[:1]) - if err != nil { - t.Fatal(err) - } - - // Send want for 10 blocks - ctx, cancel = context.WithCancel(context.Background()) - _, err = bswap.GetBlocks(ctx, keys[10:]) - if err != nil { - t.Fatal(err) - } - - // Even after 50 milli-seconds we haven't explicitly cancelled anything - // and no timeouts have expired, so we should have 11 want-haves - time.Sleep(time.Millisecond * 50) - if len(bswap.GetWantHaves()) != 11 { - t.Fatal("should have 11 keys in wantlist") - } - - // Cancel the timeout for the request for 10 blocks. This should remove - // the want-haves - cancel() - - // Once the cancel is processed, we are left with the request for 1 block - time.Sleep(time.Millisecond * 50) - if !(len(bswap.GetWantHaves()) == 1 && bswap.GetWantHaves()[0] == keys[0]) { - t.Fatal("should only have keys[0] in wantlist") - } -} - -func assertLedgerMatch(ra, rb *server.Receipt) error { - if ra.Sent != rb.Recv { - return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d sent vs %d recvd", ra.Sent, rb.Recv) - } - - if ra.Recv != rb.Sent { - return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d recvd vs %d sent", ra.Recv, rb.Sent) - } - - if ra.Exchanged != rb.Exchanged { - return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged) - } - - return nil -} - -func assertLedgerEqual(ra, rb *server.Receipt) error { - if ra.Value != rb.Value { - return fmt.Errorf("mismatch in ledgers (value/debt ratio): %f vs %f ", ra.Value, rb.Value) - } - - if ra.Sent != rb.Sent { - return fmt.Errorf("mismatch in ledgers (sent bytes): %d vs %d", ra.Sent, rb.Sent) - } - - if ra.Recv != rb.Recv { - return fmt.Errorf("mismatch in ledgers (recvd bytes): %d vs %d", ra.Recv, rb.Recv) - } - - if ra.Exchanged != rb.Exchanged { - return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged) - } - - return nil -} - -func newReceipt(sent, recv, exchanged uint64) *server.Receipt { - return &server.Receipt{ - Peer: "test", - Value: float64(sent) / (1 + float64(recv)), - Sent: sent, - Recv: recv, - Exchanged: exchanged, - } -} - -func TestBitswapLedgerOneWay(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - bg := blocksutil.NewBlockGenerator() - - t.Log("Test ledgers match when one peer sends block to another") - - instances := ig.Instances(2) - blocks := bg.Blocks(1) - addBlock(t, context.Background(), instances[0], blocks[0]) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) - if err != nil { - t.Fatal(err) - } - - ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) - rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) - - // compare peer ledger receipts - err = assertLedgerMatch(ra, rb) - if err != nil { - t.Fatal(err) - } - - // check that receipts have intended values - ratest := newReceipt(1, 0, 1) - err = assertLedgerEqual(ratest, ra) - if err != nil { - t.Fatal(err) - } - rbtest := newReceipt(0, 1, 1) - err = assertLedgerEqual(rbtest, rb) - if err != nil { - t.Fatal(err) - } - - t.Log(blk) - for _, inst := range instances { - err := inst.Exchange.Close() - if err != nil { - t.Fatal(err) - } - } -} - -func TestBitswapLedgerTwoWay(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - bg := blocksutil.NewBlockGenerator() - - t.Log("Test ledgers match when two peers send one block to each other") - - instances := ig.Instances(2) - blocks := bg.Blocks(2) - addBlock(t, context.Background(), instances[0], blocks[0]) - addBlock(t, context.Background(), instances[1], blocks[1]) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - _, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) - if err != nil { - t.Fatal(err) - } - - ctx, cancel = context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - blk, err := instances[0].Exchange.GetBlock(ctx, blocks[1].Cid()) - if err != nil { - t.Fatal(err) - } - - ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) - rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) - - // compare peer ledger receipts - err = assertLedgerMatch(ra, rb) - if err != nil { - t.Fatal(err) - } - - // check that receipts have intended values - rtest := newReceipt(1, 1, 2) - err = assertLedgerEqual(rtest, ra) - if err != nil { - t.Fatal(err) - } - - err = assertLedgerEqual(rtest, rb) - if err != nil { - t.Fatal(err) - } - - t.Log(blk) - for _, inst := range instances { - err := inst.Exchange.Close() - if err != nil { - t.Fatal(err) - } - } -} - -type testingScoreLedger struct { - scorePeer server.ScorePeerFunc - started chan struct{} - closed chan struct{} -} - -func newTestingScoreLedger() *testingScoreLedger { - return &testingScoreLedger{ - nil, - make(chan struct{}), - make(chan struct{}), - } -} - -func (tsl *testingScoreLedger) GetReceipt(p peer.ID) *server.Receipt { - return nil -} -func (tsl *testingScoreLedger) AddToSentBytes(p peer.ID, n int) {} -func (tsl *testingScoreLedger) AddToReceivedBytes(p peer.ID, n int) {} -func (tsl *testingScoreLedger) PeerConnected(p peer.ID) {} -func (tsl *testingScoreLedger) PeerDisconnected(p peer.ID) {} -func (tsl *testingScoreLedger) Start(scorePeer server.ScorePeerFunc) { - tsl.scorePeer = scorePeer - close(tsl.started) -} -func (tsl *testingScoreLedger) Stop() { - close(tsl.closed) -} - -// Tests start and stop of a custom decision logic -func TestWithScoreLedger(t *testing.T) { - tsl := newTestingScoreLedger() - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - bsOpts := []bitswap.Option{bitswap.WithScoreLedger(tsl)} - ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) - defer ig.Close() - i := ig.Next() - defer i.Exchange.Close() - - select { - case <-tsl.started: - if tsl.scorePeer == nil { - t.Fatal("Expected the score function to be initialized") - } - case <-time.After(time.Second * 5): - t.Fatal("Expected the score ledger to be started within 5s") - } - - i.Exchange.Close() - select { - case <-tsl.closed: - case <-time.After(time.Second * 5): - t.Fatal("Expected the score ledger to be closed within 5s") - } -} diff --git a/client/bitswap_with_sessions_test.go b/client/bitswap_with_sessions_test.go deleted file mode 100644 index 5e4d2454..00000000 --- a/client/bitswap_with_sessions_test.go +++ /dev/null @@ -1,480 +0,0 @@ -package client_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/ipfs/go-bitswap" - "github.com/ipfs/go-bitswap/client/internal/session" - testinstance "github.com/ipfs/go-bitswap/testinstance" - tn "github.com/ipfs/go-bitswap/testnet" - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - delay "github.com/ipfs/go-ipfs-delay" - mockrouting "github.com/ipfs/go-ipfs-routing/mock" - tu "github.com/libp2p/go-libp2p-testing/etc" -) - -func getVirtualNetwork() tn.Network { - // FIXME: the tests are really sensitive to the network delay. fix them to work - // well under varying conditions - return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) -} - -func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk blocks.Block) { - t.Helper() - err := inst.Blockstore().Put(ctx, blk) - if err != nil { - t.Fatal(err) - } - err = inst.Exchange.NotifyNewBlocks(ctx, blk) - if err != nil { - t.Fatal(err) - } -} - -func TestBasicSessions(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - block := bgen.Next() - inst := ig.Instances(2) - - a := inst[0] - b := inst[1] - - // Add a block to Peer B - if err := b.Blockstore().Put(ctx, block); err != nil { - t.Fatal(err) - } - - // Create a session on Peer A - sesa := a.Exchange.NewSession(ctx) - - // Get the block - blkout, err := sesa.GetBlock(ctx, block.Cid()) - if err != nil { - t.Fatal(err) - } - - if !blkout.Cid().Equals(block.Cid()) { - t.Fatal("got wrong block") - } -} - -func assertBlockLists(got, exp []blocks.Block) error { - if len(got) != len(exp) { - return fmt.Errorf("got wrong number of blocks, %d != %d", len(got), len(exp)) - } - - h := cid.NewSet() - for _, b := range got { - h.Add(b.Cid()) - } - for _, b := range exp { - if !h.Has(b.Cid()) { - return fmt.Errorf("didnt have: %s", b.Cid()) - } - } - return nil -} - -func TestSessionBetweenPeers(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.SetSimulateDontHavesOnTimeout(false)}) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - inst := ig.Instances(10) - - // Add 101 blocks to Peer A - blks := bgen.Blocks(101) - if err := inst[0].Blockstore().PutMany(ctx, blks); err != nil { - t.Fatal(err) - } - - var cids []cid.Cid - for _, blk := range blks { - cids = append(cids, blk.Cid()) - } - - // Create a session on Peer B - ses := inst[1].Exchange.NewSession(ctx) - if _, err := ses.GetBlock(ctx, cids[0]); err != nil { - t.Fatal(err) - } - blks = blks[1:] - cids = cids[1:] - - // Fetch blocks with the session, 10 at a time - for i := 0; i < 10; i++ { - ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) - if err != nil { - t.Fatal(err) - } - - var got []blocks.Block - for b := range ch { - got = append(got, b) - } - if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil { - t.Fatal(err) - } - } - - // Uninvolved nodes should receive - // - initial broadcast want-have of root block - // - CANCEL (when Peer A receives the root block from Peer B) - for _, is := range inst[2:] { - stat, err := is.Exchange.Stat() - if err != nil { - t.Fatal(err) - } - if stat.MessagesReceived > 2 { - t.Fatal("uninvolved nodes should only receive two messages", stat.MessagesReceived) - } - } -} - -func TestSessionSplitFetch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - inst := ig.Instances(11) - - // Add 10 distinct blocks to each of 10 peers - blks := bgen.Blocks(100) - for i := 0; i < 10; i++ { - if err := inst[i].Blockstore().PutMany(ctx, blks[i*10:(i+1)*10]); err != nil { - t.Fatal(err) - } - } - - var cids []cid.Cid - for _, blk := range blks { - cids = append(cids, blk.Cid()) - } - - // Create a session on the remaining peer and fetch all the blocks 10 at a time - ses := inst[10].Exchange.NewSession(ctx).(*session.Session) - ses.SetBaseTickDelay(time.Millisecond * 10) - - for i := 0; i < 10; i++ { - ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) - if err != nil { - t.Fatal(err) - } - - var got []blocks.Block - for b := range ch { - got = append(got, b) - } - if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil { - t.Fatal(err) - } - } -} - -func TestFetchNotConnected(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.ProviderSearchDelay(10 * time.Millisecond)}) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - other := ig.Next() - - // Provide 10 blocks on Peer A - blks := bgen.Blocks(10) - for _, block := range blks { - addBlock(t, ctx, other, block) - } - - var cids []cid.Cid - for _, blk := range blks { - cids = append(cids, blk.Cid()) - } - - // Request blocks with Peer B - // Note: Peer A and Peer B are not initially connected, so this tests - // that Peer B will search for and find Peer A - thisNode := ig.Next() - ses := thisNode.Exchange.NewSession(ctx).(*session.Session) - ses.SetBaseTickDelay(time.Millisecond * 10) - - ch, err := ses.GetBlocks(ctx, cids) - if err != nil { - t.Fatal(err) - } - - var got []blocks.Block - for b := range ch { - got = append(got, b) - } - if err := assertBlockLists(got, blks); err != nil { - t.Fatal(err) - } -} - -func TestFetchAfterDisconnect(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{ - bitswap.ProviderSearchDelay(10 * time.Millisecond), - bitswap.RebroadcastDelay(delay.Fixed(15 * time.Millisecond)), - }) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - inst := ig.Instances(2) - peerA := inst[0] - peerB := inst[1] - - // Provide 5 blocks on Peer A - blks := bgen.Blocks(10) - var cids []cid.Cid - for _, blk := range blks { - cids = append(cids, blk.Cid()) - } - - firstBlks := blks[:5] - for _, block := range firstBlks { - addBlock(t, ctx, peerA, block) - } - - // Request all blocks with Peer B - ses := peerB.Exchange.NewSession(ctx).(*session.Session) - ses.SetBaseTickDelay(time.Millisecond * 10) - - ch, err := ses.GetBlocks(ctx, cids) - if err != nil { - t.Fatal(err) - } - - // Should get first 5 blocks - var got []blocks.Block - for i := 0; i < 5; i++ { - b := <-ch - got = append(got, b) - } - - if err := assertBlockLists(got, blks[:5]); err != nil { - t.Fatal(err) - } - - // Break connection - err = peerA.Adapter.DisconnectFrom(ctx, peerB.Peer) - if err != nil { - t.Fatal(err) - } - - time.Sleep(20 * time.Millisecond) - - // Provide remaining blocks - lastBlks := blks[5:] - for _, block := range lastBlks { - addBlock(t, ctx, peerA, block) - } - - // Peer B should call FindProviders() and find Peer A - - // Should get last 5 blocks - for i := 0; i < 5; i++ { - select { - case b := <-ch: - got = append(got, b) - case <-ctx.Done(): - } - } - - if err := assertBlockLists(got, blks); err != nil { - t.Fatal(err) - } -} - -func TestInterestCacheOverflow(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - blks := bgen.Blocks(2049) - inst := ig.Instances(2) - - a := inst[0] - b := inst[1] - - ses := a.Exchange.NewSession(ctx) - zeroch, err := ses.GetBlocks(ctx, []cid.Cid{blks[0].Cid()}) - if err != nil { - t.Fatal(err) - } - - var restcids []cid.Cid - for _, blk := range blks[1:] { - restcids = append(restcids, blk.Cid()) - } - - restch, err := ses.GetBlocks(ctx, restcids) - if err != nil { - t.Fatal(err) - } - - // wait to ensure that all the above cids were added to the sessions cache - time.Sleep(time.Millisecond * 50) - - addBlock(t, ctx, b, blks[0]) - - select { - case blk, ok := <-zeroch: - if ok && blk.Cid().Equals(blks[0].Cid()) { - // success! - } else { - t.Fatal("failed to get the block") - } - case <-restch: - t.Fatal("should not get anything on restch") - case <-time.After(time.Second * 5): - t.Fatal("timed out waiting for block") - } -} - -func TestPutAfterSessionCacheEvict(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - blks := bgen.Blocks(2500) - inst := ig.Instances(1) - - a := inst[0] - - ses := a.Exchange.NewSession(ctx) - - var allcids []cid.Cid - for _, blk := range blks[1:] { - allcids = append(allcids, blk.Cid()) - } - - blkch, err := ses.GetBlocks(ctx, allcids) - if err != nil { - t.Fatal(err) - } - - // wait to ensure that all the above cids were added to the sessions cache - time.Sleep(time.Millisecond * 50) - - addBlock(t, ctx, a, blks[17]) - - select { - case <-blkch: - case <-time.After(time.Millisecond * 50): - t.Fatal("timed out waiting for block") - } -} - -func TestMultipleSessions(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - blk := bgen.Blocks(1)[0] - inst := ig.Instances(2) - - a := inst[0] - b := inst[1] - - ctx1, cancel1 := context.WithCancel(ctx) - ses := a.Exchange.NewSession(ctx1) - - blkch, err := ses.GetBlocks(ctx, []cid.Cid{blk.Cid()}) - if err != nil { - t.Fatal(err) - } - cancel1() - - ses2 := a.Exchange.NewSession(ctx) - blkch2, err := ses2.GetBlocks(ctx, []cid.Cid{blk.Cid()}) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Millisecond * 10) - addBlock(t, ctx, b, blk) - - select { - case <-blkch2: - case <-time.After(time.Second * 20): - t.Fatal("bad juju") - } - _ = blkch -} - -func TestWantlistClearsOnCancel(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) - defer ig.Close() - bgen := blocksutil.NewBlockGenerator() - - blks := bgen.Blocks(10) - var cids []cid.Cid - for _, blk := range blks { - cids = append(cids, blk.Cid()) - } - - inst := ig.Instances(1) - - a := inst[0] - - ctx1, cancel1 := context.WithCancel(ctx) - ses := a.Exchange.NewSession(ctx1) - - _, err := ses.GetBlocks(ctx, cids) - if err != nil { - t.Fatal(err) - } - cancel1() - - if err := tu.WaitFor(ctx, func() error { - if len(a.Exchange.GetWantlist()) > 0 { - return fmt.Errorf("expected empty wantlist") - } - return nil - }); err != nil { - t.Fatal(err) - } -} diff --git a/client/client.go b/client/client.go index ca94da8c..1eb9940d 100644 --- a/client/client.go +++ b/client/client.go @@ -4,476 +4,61 @@ package client import ( "context" - "errors" - "sync" "time" delay "github.com/ipfs/go-ipfs-delay" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - bsgetter "github.com/ipfs/go-bitswap/client/internal/getter" - bsmq "github.com/ipfs/go-bitswap/client/internal/messagequeue" - "github.com/ipfs/go-bitswap/client/internal/notifications" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bspqm "github.com/ipfs/go-bitswap/client/internal/providerquerymanager" - bssession "github.com/ipfs/go-bitswap/client/internal/session" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" - bssm "github.com/ipfs/go-bitswap/client/internal/sessionmanager" - bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/go-bitswap/internal" - "github.com/ipfs/go-bitswap/internal/defaults" - bsmsg "github.com/ipfs/go-bitswap/message" - bmetrics "github.com/ipfs/go-bitswap/metrics" bsnet "github.com/ipfs/go-bitswap/network" "github.com/ipfs/go-bitswap/tracer" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - logging "github.com/ipfs/go-log" - "github.com/ipfs/go-metrics-interface" - process "github.com/jbenet/goprocess" - procctx "github.com/jbenet/goprocess/context" - "github.com/libp2p/go-libp2p/core/peer" + libipfs "github.com/ipfs/go-libipfs/bitswap/client" ) -var log = logging.Logger("bitswap-client") - // Option defines the functional option type that can be used to configure // bitswap instances -type Option func(*Client) +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client.Option instead +type Option = libipfs.Option // ProviderSearchDelay overwrites the global provider search delay +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client.ProviderSearchDelay instead func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { - return func(bs *Client) { - bs.provSearchDelay = newProvSearchDelay - } + return libipfs.ProviderSearchDelay(newProvSearchDelay) } // RebroadcastDelay overwrites the global provider rebroadcast delay +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client.RebroadcastDelay instead func RebroadcastDelay(newRebroadcastDelay delay.D) Option { - return func(bs *Client) { - bs.rebroadcastDelay = newRebroadcastDelay - } + return libipfs.RebroadcastDelay(newRebroadcastDelay) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client.SetSimulateDontHavesOnTimeout instead func SetSimulateDontHavesOnTimeout(send bool) Option { - return func(bs *Client) { - bs.simulateDontHavesOnTimeout = send - } + return libipfs.SetSimulateDontHavesOnTimeout(send) } // Configures the Client to use given tracer. // This provides methods to access all messages sent and received by the Client. // This interface can be used to implement various statistics (this is original intent). +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client.WithTracer instead func WithTracer(tap tracer.Tracer) Option { - return func(bs *Client) { - bs.tracer = tap - } + return libipfs.WithTracer(tap) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client.WithBlockReceivedNotifier instead func WithBlockReceivedNotifier(brn BlockReceivedNotifier) Option { - return func(bs *Client) { - bs.blockReceivedNotifier = brn - } + return libipfs.WithBlockReceivedNotifier(brn) } -type BlockReceivedNotifier interface { - // ReceivedBlocks notifies the decision engine that a peer is well-behaving - // and gave us useful data, potentially increasing its score and making us - // send them more data in exchange. - ReceivedBlocks(peer.ID, []blocks.Block) -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client.BlockReceivedNotifier instead +type BlockReceivedNotifier = libipfs.BlockReceivedNotifier // New initializes a Bitswap client that runs until client.Close is called. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client.New instead func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Client { - // important to use provided parent context (since it may include important - // loggable data). It's probably not a good idea to allow bitswap to be - // coupled to the concerns of the ipfs daemon in this way. - // - // FIXME(btc) Now that bitswap manages itself using a process, it probably - // shouldn't accept a context anymore. Clients should probably use Close() - // exclusively. We should probably find another way to share logging data - ctx, cancelFunc := context.WithCancel(parent) - - px := process.WithTeardown(func() error { - return nil - }) - - // onDontHaveTimeout is called when a want-block is sent to a peer that - // has an old version of Bitswap that doesn't support DONT_HAVE messages, - // or when no response is received within a timeout. - var sm *bssm.SessionManager - var bs *Client - onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { - // Simulate a message arriving with DONT_HAVEs - if bs.simulateDontHavesOnTimeout { - sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) - } - } - peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { - return bsmq.New(ctx, p, network, onDontHaveTimeout) - } - - sim := bssim.New() - bpm := bsbpm.New() - pm := bspm.New(ctx, peerQueueFactory, network.Self()) - pqm := bspqm.New(ctx, network) - - sessionFactory := func( - sessctx context.Context, - sessmgr bssession.SessionManager, - id uint64, - spm bssession.SessionPeerManager, - sim *bssim.SessionInterestManager, - pm bssession.PeerManager, - bpm *bsbpm.BlockPresenceManager, - notif notifications.PubSub, - provSearchDelay time.Duration, - rebroadcastDelay delay.D, - self peer.ID) bssm.Session { - return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) - } - sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { - return bsspm.New(id, network.ConnectionManager()) - } - notif := notifications.New() - sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) - - bs = &Client{ - blockstore: bstore, - network: network, - process: px, - pm: pm, - pqm: pqm, - sm: sm, - sim: sim, - notif: notif, - counters: new(counters), - dupMetric: bmetrics.DupHist(ctx), - allMetric: bmetrics.AllHist(ctx), - provSearchDelay: defaults.ProvSearchDelay, - rebroadcastDelay: delay.Fixed(time.Minute), - simulateDontHavesOnTimeout: true, - } - - // apply functional options before starting and running bitswap - for _, option := range options { - option(bs) - } - - bs.pqm.Startup() - - // bind the context and process. - // do it over here to avoid closing before all setup is done. - go func() { - <-px.Closing() // process closes first - sm.Shutdown() - cancelFunc() - notif.Shutdown() - }() - procctx.CloseAfterContext(px, ctx) // parent cancelled first - - return bs + return libipfs.New(parent, network, bstore, options...) } // Client instances implement the bitswap protocol. -type Client struct { - pm *bspm.PeerManager - - // the provider query manager manages requests to find providers - pqm *bspqm.ProviderQueryManager - - // network delivers messages on behalf of the session - network bsnet.BitSwapNetwork - - // blockstore is the local database - // NB: ensure threadsafety - blockstore blockstore.Blockstore - - // manages channels of outgoing blocks for sessions - notif notifications.PubSub - - process process.Process - - // Counters for various statistics - counterLk sync.Mutex - counters *counters - - // Metrics interface metrics - dupMetric metrics.Histogram - allMetric metrics.Histogram - - // External statistics interface - tracer tracer.Tracer - - // the SessionManager routes requests to interested sessions - sm *bssm.SessionManager - - // the SessionInterestManager keeps track of which sessions are interested - // in which CIDs - sim *bssim.SessionInterestManager - - // how long to wait before looking for providers in a session - provSearchDelay time.Duration - - // how often to rebroadcast providing requests to find more optimized providers - rebroadcastDelay delay.D - - blockReceivedNotifier BlockReceivedNotifier - - // whether we should actually simulate dont haves on request timeout - simulateDontHavesOnTimeout bool -} - -type counters struct { - blocksRecvd uint64 - dupBlocksRecvd uint64 - dupDataRecvd uint64 - dataRecvd uint64 - messagesRecvd uint64 -} - -// GetBlock attempts to retrieve a particular block from peers within the -// deadline enforced by the context. -func (bs *Client) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { - ctx, span := internal.StartSpan(ctx, "GetBlock", trace.WithAttributes(attribute.String("Key", k.String()))) - defer span.End() - return bsgetter.SyncGetBlock(ctx, k, bs.GetBlocks) -} - -// GetBlocks returns a channel where the caller may receive blocks that -// correspond to the provided |keys|. Returns an error if BitSwap is unable to -// begin this request within the deadline enforced by the context. -// -// NB: Your request remains open until the context expires. To conserve -// resources, provide a context with a reasonably short deadline (ie. not one -// that lasts throughout the lifetime of the server) -func (bs *Client) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { - ctx, span := internal.StartSpan(ctx, "GetBlocks", trace.WithAttributes(attribute.Int("NumKeys", len(keys)))) - defer span.End() - session := bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) - return session.GetBlocks(ctx, keys) -} - -// NotifyNewBlocks announces the existence of blocks to this bitswap service. -// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure -// that those blocks are available in the blockstore before calling this function. -func (bs *Client) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { - ctx, span := internal.StartSpan(ctx, "NotifyNewBlocks") - defer span.End() - - select { - case <-bs.process.Closing(): - return errors.New("bitswap is closed") - default: - } - - blkCids := make([]cid.Cid, len(blks)) - for i, blk := range blks { - blkCids[i] = blk.Cid() - } - - // Send all block keys (including duplicates) to any sessions that want them. - // (The duplicates are needed by sessions for accounting purposes) - bs.sm.ReceiveFrom(ctx, "", blkCids, nil, nil) - - // Publish the block to any Bitswap clients that had requested blocks. - // (the sessions use this pubsub mechanism to inform clients of incoming - // blocks) - bs.notif.Publish(blks...) - - return nil -} - -// receiveBlocksFrom process blocks received from the network -func (bs *Client) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { - select { - case <-bs.process.Closing(): - return errors.New("bitswap is closed") - default: - } - - wanted, notWanted := bs.sim.SplitWantedUnwanted(blks) - for _, b := range notWanted { - log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) - } - - allKs := make([]cid.Cid, 0, len(blks)) - for _, b := range blks { - allKs = append(allKs, b.Cid()) - } - - // Inform the PeerManager so that we can calculate per-peer latency - combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) - combined = append(combined, allKs...) - combined = append(combined, haves...) - combined = append(combined, dontHaves...) - bs.pm.ResponseReceived(from, combined) - - // Send all block keys (including duplicates) to any sessions that want them for accounting purpose. - bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) - - if bs.blockReceivedNotifier != nil { - bs.blockReceivedNotifier.ReceivedBlocks(from, wanted) - } - - // Publish the block to any Bitswap clients that had requested blocks. - // (the sessions use this pubsub mechanism to inform clients of incoming - // blocks) - for _, b := range wanted { - bs.notif.Publish(b) - } - - for _, b := range wanted { - log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) - } - - return nil -} - -// ReceiveMessage is called by the network interface when a new message is -// received. -func (bs *Client) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { - bs.counterLk.Lock() - bs.counters.messagesRecvd++ - bs.counterLk.Unlock() - - if bs.tracer != nil { - bs.tracer.MessageReceived(p, incoming) - } - - iblocks := incoming.Blocks() - - if len(iblocks) > 0 { - bs.updateReceiveCounters(iblocks) - for _, b := range iblocks { - log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) - } - } - - haves := incoming.Haves() - dontHaves := incoming.DontHaves() - if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { - // Process blocks - err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) - if err != nil { - log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) - return - } - } -} - -func (bs *Client) updateReceiveCounters(blocks []blocks.Block) { - // Check which blocks are in the datastore - // (Note: any errors from the blockstore are simply logged out in - // blockstoreHas()) - blocksHas := bs.blockstoreHas(blocks) - - bs.counterLk.Lock() - defer bs.counterLk.Unlock() - - // Do some accounting for each block - for i, b := range blocks { - has := blocksHas[i] - - blkLen := len(b.RawData()) - bs.allMetric.Observe(float64(blkLen)) - if has { - bs.dupMetric.Observe(float64(blkLen)) - } - - c := bs.counters - - c.blocksRecvd++ - c.dataRecvd += uint64(blkLen) - if has { - c.dupBlocksRecvd++ - c.dupDataRecvd += uint64(blkLen) - } - } -} - -func (bs *Client) blockstoreHas(blks []blocks.Block) []bool { - res := make([]bool, len(blks)) - - wg := sync.WaitGroup{} - for i, block := range blks { - wg.Add(1) - go func(i int, b blocks.Block) { - defer wg.Done() - - has, err := bs.blockstore.Has(context.TODO(), b.Cid()) - if err != nil { - log.Infof("blockstore.Has error: %s", err) - has = false - } - - res[i] = has - }(i, block) - } - wg.Wait() - - return res -} - -// PeerConnected is called by the network interface -// when a peer initiates a new connection to bitswap. -func (bs *Client) PeerConnected(p peer.ID) { - bs.pm.Connected(p) -} - -// PeerDisconnected is called by the network interface when a peer -// closes a connection -func (bs *Client) PeerDisconnected(p peer.ID) { - bs.pm.Disconnected(p) -} - -// ReceiveError is called by the network interface when an error happens -// at the network layer. Currently just logs error. -func (bs *Client) ReceiveError(err error) { - log.Infof("Bitswap Client ReceiveError: %s", err) - // TODO log the network error - // TODO bubble the network error up to the parent context/error logger -} - -// Close is called to shutdown the Client -func (bs *Client) Close() error { - return bs.process.Close() -} - -// GetWantlist returns the current local wantlist (both want-blocks and -// want-haves). -func (bs *Client) GetWantlist() []cid.Cid { - return bs.pm.CurrentWants() -} - -// GetWantBlocks returns the current list of want-blocks. -func (bs *Client) GetWantBlocks() []cid.Cid { - return bs.pm.CurrentWantBlocks() -} - -// GetWanthaves returns the current list of want-haves. -func (bs *Client) GetWantHaves() []cid.Cid { - return bs.pm.CurrentWantHaves() -} - -// IsOnline is needed to match go-ipfs-exchange-interface -func (bs *Client) IsOnline() bool { - return true -} - -// NewSession generates a new Bitswap session. You should use this, rather -// that calling Client.GetBlocks, any time you intend to do several related -// block requests in a row. The session returned will have it's own GetBlocks -// method, but the session will use the fact that the requests are related to -// be more efficient in its requests to peers. If you are using a session -// from go-blockservice, it will create a bitswap session automatically. -func (bs *Client) NewSession(ctx context.Context) exchange.Fetcher { - ctx, span := internal.StartSpan(ctx, "NewSession") - defer span.End() - return bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client.Client instead +type Client = libipfs.Client diff --git a/client/docs/go-bitswap.png b/client/docs/go-bitswap.png deleted file mode 100644 index 805bf656..00000000 Binary files a/client/docs/go-bitswap.png and /dev/null differ diff --git a/client/docs/go-bitswap.puml b/client/docs/go-bitswap.puml deleted file mode 100644 index af9134d7..00000000 --- a/client/docs/go-bitswap.puml +++ /dev/null @@ -1,49 +0,0 @@ -@startuml Bitswap Components - -node "Top Level Interface" { - [Bitswap] -} - -node "Sending Blocks" { - [Bitswap] --* [Engine] - [Engine] -left-* [Ledger] - [Engine] -right-* [PeerTaskQueue] - [Engine] --> [TaskWorker (workers.go)] -} - -node "Providing" { - [Bitswap] --* [Provide Collector (workers.go)] - [Provide Collector (workers.go)] --* [Provide Worker (workers.go)] -} - -node "Finding Providers" { - [Bitswap] --* [ProvideQueryManager] -} - -node "Sessions (smart requests)" { - [Bitswap] --* [SessionManager] - [SessionManager] --> [SessionInterestManager] - [SessionManager] --o [Session] - [SessionManager] --> [BlockPresenceManager] - [Session] --* [sessionWantSender] - [Session] --* [SessionPeerManager] - [Session] --> [ProvideQueryManager] - [Session] --* [sessionWants] - [Session] --> [SessionInterestManager] - [sessionWantSender] --> [BlockPresenceManager] -} - -node "Requesting Blocks" { - [SessionManager] --> [PeerManager] - [sessionWantSender] --> [PeerManager] - [PeerManager] --* [MessageQueue] -} - -node "Network" { - [BitSwapNetwork] - [MessageQueue] --> [BitSwapNetwork] - [ProvideQueryManager] --> [BitSwapNetwork] - [TaskWorker (workers.go)] --> [BitSwapNetwork] - [Provide Worker (workers.go)] --> [BitSwapNetwork] -} -@enduml \ No newline at end of file diff --git a/client/docs/how-bitswap-works.md b/client/docs/how-bitswap-works.md deleted file mode 100644 index 303b0576..00000000 --- a/client/docs/how-bitswap-works.md +++ /dev/null @@ -1,143 +0,0 @@ -How Bitswap Works -================= - -When a client requests blocks, Bitswap sends the CID of those blocks to its peers as "wants". When Bitswap receives a "want" from a peer, it responds with the corresponding block. - -### Requesting Blocks - -#### Sessions - -Bitswap Sessions allow the client to make related requests to the same group of peers. For example typically requests to fetch all the blocks in a file would be made with a single session. - -#### Discovery - -To discover which peers have a block, Bitswap broadcasts a `want-have` message to all peers it is connected to asking if they have the block. - -Any peers that have the block respond with a `HAVE` message. They are added to the Session. - -If no connected peers have the block, Bitswap queries the DHT to find peers that have the block. - -### Wants - -When the client requests a block, Bitswap sends a `want-have` message with the block CID to all peers in the Session to ask who has the block. - -Bitswap simultaneously sends a `want-block` message to one of the peers in the Session to request the block. If the peer does not have the block, it responds with a `DONT_HAVE` message. In that case Bitswap selects another peer and sends the `want-block` to that peer. - -If no peers have the block, Bitswap broadcasts a `want-have` to all connected peers, and queries the DHT to find peers that have the block. - -#### Peer Selection - -Bitswap uses a probabilistic algorithm to select which peer to send `want-block` to, favouring peers that -- sent `HAVE` for the block -- were discovered as providers of the block in the DHT -- were first to send blocks to previous session requests - -The selection algorithm includes some randomness so as to allow peers that are discovered later, but are more responsive, to rise in the ranking. - -#### Periodic Search Widening - -Periodically the Bitswap Session selects a random CID from the list of "pending wants" (wants that have been sent but for which no block has been received). Bitswap broadcasts a `want-have` to all connected peers and queries the DHT for the CID. - -### Serving Blocks - -#### Processing Requests - -When Bitswap receives a `want-have` it checks if the block is in the local blockstore. - -If the block is in the local blockstore Bitswap responds with `HAVE`. If the block is small Bitswap sends the block itself instead of `HAVE`. - -If the block is not in the local blockstore, Bitswap checks the `send-dont-have` flag on the request. If `send-dont-have` is true, Bitswap sends `DONT_HAVE`. Otherwise it does not respond. - -#### Processing Incoming Blocks - -When Bitswap receives a block, it checks to see if any peers sent `want-have` or `want-block` for the block. If so it sends `HAVE` or the block itself to those peers. - -#### Priority - -Bitswap keeps requests from each peer in separate queues, ordered by the priority specified in the request message. - -To select which peer to send the next response to, Bitswap chooses the peer with the least amount of data in its send queue. That way it will tend to "keep peers busy" by always keeping some data in each peer's send queue. - - -Implementation -============== - -![Bitswap Components](./go-bitswap.png) - -### Bitswap - -The Bitswap class receives incoming messages and implements the Exchange API. - -When a message is received, Bitswap -- Records some statistics about the message -- Informs the Engine of any new wants - So that the Engine can send responses to the wants -- Informs the Engine of any received blocks - So that the Engine can send the received blocks to any peers that want them -- Informs the SessionManager of received blocks, HAVEs and DONT_HAVEs - So that the SessionManager can inform interested sessions - -When the client makes an API call, Bitswap creates a new Session and calls the corresponding method (eg `GetBlocks()`). - -### Sending Blocks - -When the Engine is informed of new wants it -- Adds the wants to the Ledger (peer A wants block with CID Qmhash...) -- Checks the blockstore for the corresponding blocks, and adds a task to the PeerTaskQueue - - If the blockstore does not have a wanted block, adds a `DONT_HAVE` task - - If the blockstore has the block - - for a `want-have` adds a `HAVE` task - - for a `want-block` adds a `block` task - -When the Engine is informed of new blocks it checks the Ledger to see if any peers want information about those blocks. -- For each block - - For each peer that sent a `want-have` for the corresponding block - Adds a `HAVE` task to the PeerTaskQueue - - For each peer that sent a `want-block` for the corresponding block - Adds a `block` task to the PeerTaskQueue - -The Engine periodically pops tasks off the PeerTaskQueue, and creates a message with `blocks`, `HAVEs` and `DONT_HAVEs`. -The PeerTaskQueue prioritizes tasks such that the peers with the least amount of data in their send queue are highest priority, so as to "keep peers busy". - -### Requesting Blocks - -When the SessionManager is informed of a new message, it -- informs the BlockPresenceManager - The BlockPresenceManager keeps track of which peers have sent HAVES and DONT_HAVEs for each block -- informs the Sessions that are interested in the received blocks and wants -- informs the PeerManager of received blocks - The PeerManager checks if any wants were send to a peer for the received blocks. If so it sends a `CANCEL` message to those peers. - -### Sessions - -The Session starts in "discovery" mode. This means it doesn't have any peers yet, and needs to discover which peers have the blocks it wants. - -When the client initially requests blocks from a Session, the Session -- informs the SessionInterestManager that it is interested in the want -- informs the sessionWantManager of the want -- tells the PeerManager to broadcast a `want-have` to all connected peers so as to discover which peers have the block -- queries the ProviderQueryManager to discover which peers have the block - -When the session receives a message with `HAVE` or a `block`, it informs the SessionPeerManager. The SessionPeerManager keeps track of all peers in the session. -When the session receives a message with a `block` it informs the SessionInterestManager. - -Once the session has peers it is no longer in "discovery" mode. When the client requests subsequent blocks the Session informs the sessionWantSender. The sessionWantSender tells the PeerManager to send `want-have` and `want-block` to peers in the session. - -For each block that the Session wants, the sessionWantSender decides which peer is most likely to have a block by checking with the BlockPresenceManager which peers have sent a `HAVE` for the block. If no peers or multiple peers have sent `HAVE`, a peer is chosen probabilistically according to which how many times each peer was first to send a block in response to previous wants requested by the Session. The sessionWantSender sends a single "optimistic" `want-block` to the chosen peer, and sends `want-have` to all other peers in the Session. -When a peer responds with `DONT_HAVE`, the Session sends `want-block` to the next best peer, and so on until the block is received. - -### PeerManager - -The PeerManager creates a MessageQueue for each peer that connects to Bitswap. It remembers which `want-have` / `want-block` has been sent to each peer, and directs any new wants to the correct peer. -The MessageQueue groups together wants into a message, and sends the message to the peer. It monitors for timeouts and simulates a `DONT_HAVE` response if a peer takes too long to respond. - -### Finding Providers - -When bitswap can't find a connected peer who already has the block it wants, it falls back to querying a content routing system (a DHT in IPFS's case) to try to locate a peer with the block. - -Bitswap routes these requests through the ProviderQueryManager system, which rate-limits these requests and also deduplicates in-process requests. - -### Providing - -As a bitswap client receives blocks, by default it announces them on the provided content routing system (again, a DHT in most cases). This behaviour can be disabled by passing `bitswap.ProvideEnabled(false)` as a parameter when initializing Bitswap. IPFS currently has its own experimental provider system ([go-ipfs-provider](https://github.com/ipfs/go-ipfs-provider)) which will eventually replace Bitswap's system entirely. - diff --git a/client/internal/blockpresencemanager/blockpresencemanager.go b/client/internal/blockpresencemanager/blockpresencemanager.go deleted file mode 100644 index 1b76acc5..00000000 --- a/client/internal/blockpresencemanager/blockpresencemanager.go +++ /dev/null @@ -1,121 +0,0 @@ -package blockpresencemanager - -import ( - "sync" - - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// BlockPresenceManager keeps track of which peers have indicated that they -// have or explicitly don't have a block -type BlockPresenceManager struct { - sync.RWMutex - presence map[cid.Cid]map[peer.ID]bool -} - -func New() *BlockPresenceManager { - return &BlockPresenceManager{ - presence: make(map[cid.Cid]map[peer.ID]bool), - } -} - -// ReceiveFrom is called when a peer sends us information about which blocks -// it has and does not have -func (bpm *BlockPresenceManager) ReceiveFrom(p peer.ID, haves []cid.Cid, dontHaves []cid.Cid) { - bpm.Lock() - defer bpm.Unlock() - - for _, c := range haves { - bpm.updateBlockPresence(p, c, true) - } - for _, c := range dontHaves { - bpm.updateBlockPresence(p, c, false) - } -} - -func (bpm *BlockPresenceManager) updateBlockPresence(p peer.ID, c cid.Cid, present bool) { - _, ok := bpm.presence[c] - if !ok { - bpm.presence[c] = make(map[peer.ID]bool) - } - - // Make sure not to change HAVE to DONT_HAVE - has, pok := bpm.presence[c][p] - if pok && has { - return - } - bpm.presence[c][p] = present -} - -// PeerHasBlock indicates whether the given peer has sent a HAVE for the given -// cid -func (bpm *BlockPresenceManager) PeerHasBlock(p peer.ID, c cid.Cid) bool { - bpm.RLock() - defer bpm.RUnlock() - - return bpm.presence[c][p] -} - -// PeerDoesNotHaveBlock indicates whether the given peer has sent a DONT_HAVE -// for the given cid -func (bpm *BlockPresenceManager) PeerDoesNotHaveBlock(p peer.ID, c cid.Cid) bool { - bpm.RLock() - defer bpm.RUnlock() - - have, known := bpm.presence[c][p] - return known && !have -} - -// Filters the keys such that all the given peers have received a DONT_HAVE -// for a key. -// This allows us to know if we've exhausted all possibilities of finding -// the key with the peers we know about. -func (bpm *BlockPresenceManager) AllPeersDoNotHaveBlock(peers []peer.ID, ks []cid.Cid) []cid.Cid { - bpm.RLock() - defer bpm.RUnlock() - - var res []cid.Cid - for _, c := range ks { - if bpm.allDontHave(peers, c) { - res = append(res, c) - } - } - return res -} - -func (bpm *BlockPresenceManager) allDontHave(peers []peer.ID, c cid.Cid) bool { - // Check if we know anything about the cid's block presence - ps, cok := bpm.presence[c] - if !cok { - return false - } - - // Check if we explicitly know that all the given peers do not have the cid - for _, p := range peers { - if has, pok := ps[p]; !pok || has { - return false - } - } - return true -} - -// RemoveKeys cleans up the given keys from the block presence map -func (bpm *BlockPresenceManager) RemoveKeys(ks []cid.Cid) { - bpm.Lock() - defer bpm.Unlock() - - for _, c := range ks { - delete(bpm.presence, c) - } -} - -// HasKey indicates whether the BlockPresenceManager is tracking the given key -// (used by the tests) -func (bpm *BlockPresenceManager) HasKey(c cid.Cid) bool { - bpm.Lock() - defer bpm.Unlock() - - _, ok := bpm.presence[c] - return ok -} diff --git a/client/internal/blockpresencemanager/blockpresencemanager_test.go b/client/internal/blockpresencemanager/blockpresencemanager_test.go deleted file mode 100644 index e6adfc61..00000000 --- a/client/internal/blockpresencemanager/blockpresencemanager_test.go +++ /dev/null @@ -1,238 +0,0 @@ -package blockpresencemanager - -import ( - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" - peer "github.com/libp2p/go-libp2p/core/peer" - - cid "github.com/ipfs/go-cid" -) - -const ( - expHasFalseMsg = "Expected PeerHasBlock to return false" - expHasTrueMsg = "Expected PeerHasBlock to return true" - expDoesNotHaveFalseMsg = "Expected PeerDoesNotHaveBlock to return false" - expDoesNotHaveTrueMsg = "Expected PeerDoesNotHaveBlock to return true" -) - -func TestBlockPresenceManager(t *testing.T) { - bpm := New() - - p := testutil.GeneratePeers(1)[0] - cids := testutil.GenerateCids(2) - c0 := cids[0] - c1 := cids[1] - - // Nothing stored yet, both PeerHasBlock and PeerDoesNotHaveBlock should - // return false - if bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } - - // HAVE cid0 / DONT_HAVE cid1 - bpm.ReceiveFrom(p, []cid.Cid{c0}, []cid.Cid{c1}) - - // Peer has received HAVE for cid0 - if !bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasTrueMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } - - // Peer has received DONT_HAVE for cid1 - if !bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveTrueMsg) - } - if bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasFalseMsg) - } - - // HAVE cid1 / DONT_HAVE cid0 - bpm.ReceiveFrom(p, []cid.Cid{c1}, []cid.Cid{c0}) - - // DONT_HAVE cid0 should NOT over-write earlier HAVE cid0 - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if !bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasTrueMsg) - } - - // HAVE cid1 should over-write earlier DONT_HAVE cid1 - if !bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasTrueMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } - - // Remove cid0 - bpm.RemoveKeys([]cid.Cid{c0}) - - // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should - // return false - if bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } - - // Remove cid1 - bpm.RemoveKeys([]cid.Cid{c1}) - - // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should - // return false - if bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } -} - -func TestAddRemoveMulti(t *testing.T) { - bpm := New() - - peers := testutil.GeneratePeers(2) - p0 := peers[0] - p1 := peers[1] - cids := testutil.GenerateCids(3) - c0 := cids[0] - c1 := cids[1] - c2 := cids[2] - - // p0: HAVE cid0, cid1 / DONT_HAVE cid1, cid2 - // p1: HAVE cid1, cid2 / DONT_HAVE cid0 - bpm.ReceiveFrom(p0, []cid.Cid{c0, c1}, []cid.Cid{c1, c2}) - bpm.ReceiveFrom(p1, []cid.Cid{c1, c2}, []cid.Cid{c0}) - - // Peer 0 should end up with - // - HAVE cid0 - // - HAVE cid1 - // - DONT_HAVE cid2 - if !bpm.PeerHasBlock(p0, c0) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerHasBlock(p0, c1) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p0, c2) { - t.Fatal(expDoesNotHaveTrueMsg) - } - - // Peer 1 should end up with - // - HAVE cid1 - // - HAVE cid2 - // - DONT_HAVE cid0 - if !bpm.PeerHasBlock(p1, c1) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerHasBlock(p1, c2) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p1, c0) { - t.Fatal(expDoesNotHaveTrueMsg) - } - - // Remove cid1 and cid2. Should end up with - // Peer 0: HAVE cid0 - // Peer 1: DONT_HAVE cid0 - bpm.RemoveKeys([]cid.Cid{c1, c2}) - if !bpm.PeerHasBlock(p0, c0) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p1, c0) { - t.Fatal(expDoesNotHaveTrueMsg) - } - - // The other keys should have been cleared, so both HasBlock() and - // DoesNotHaveBlock() should return false - if bpm.PeerHasBlock(p0, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p0, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p0, c2) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p0, c2) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p1, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p1, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p1, c2) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p1, c2) { - t.Fatal(expDoesNotHaveFalseMsg) - } -} - -func TestAllPeersDoNotHaveBlock(t *testing.T) { - bpm := New() - - peers := testutil.GeneratePeers(3) - p0 := peers[0] - p1 := peers[1] - p2 := peers[2] - - cids := testutil.GenerateCids(3) - c0 := cids[0] - c1 := cids[1] - c2 := cids[2] - - // c0 c1 c2 - // p0 ? N N - // p1 N Y ? - // p2 Y Y N - bpm.ReceiveFrom(p0, []cid.Cid{}, []cid.Cid{c1, c2}) - bpm.ReceiveFrom(p1, []cid.Cid{c1}, []cid.Cid{c0}) - bpm.ReceiveFrom(p2, []cid.Cid{c0, c1}, []cid.Cid{c2}) - - type testcase struct { - peers []peer.ID - ks []cid.Cid - exp []cid.Cid - } - - testcases := []testcase{ - {[]peer.ID{p0}, []cid.Cid{c0}, []cid.Cid{}}, - {[]peer.ID{p1}, []cid.Cid{c0}, []cid.Cid{c0}}, - {[]peer.ID{p2}, []cid.Cid{c0}, []cid.Cid{}}, - - {[]peer.ID{p0}, []cid.Cid{c1}, []cid.Cid{c1}}, - {[]peer.ID{p1}, []cid.Cid{c1}, []cid.Cid{}}, - {[]peer.ID{p2}, []cid.Cid{c1}, []cid.Cid{}}, - - {[]peer.ID{p0}, []cid.Cid{c2}, []cid.Cid{c2}}, - {[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}}, - {[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}}, - - // p0 recieved DONT_HAVE for c1 & c2 (but not for c0) - {[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}}, - {[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, - // Both p0 and p2 received DONT_HAVE for c2 - {[]peer.ID{p0, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{c2}}, - {[]peer.ID{p0, p1, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, - } - - for i, tc := range testcases { - if !testutil.MatchKeysIgnoreOrder( - bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), - tc.exp, - ) { - t.Fatalf("test case %d failed: expected matching keys", i) - } - } -} diff --git a/client/internal/getter/getter.go b/client/internal/getter/getter.go deleted file mode 100644 index 5a58e187..00000000 --- a/client/internal/getter/getter.go +++ /dev/null @@ -1,138 +0,0 @@ -package getter - -import ( - "context" - "errors" - - "github.com/ipfs/go-bitswap/client/internal" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" - logging "github.com/ipfs/go-log" - - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" -) - -var log = logging.Logger("bitswap") - -// GetBlocksFunc is any function that can take an array of CIDs and return a -// channel of incoming blocks. -type GetBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) - -// SyncGetBlock takes a block cid and an async function for getting several -// blocks that returns a channel, and uses that function to return the -// block syncronously. -func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, error) { - p, span := internal.StartSpan(p, "Getter.SyncGetBlock") - defer span.End() - - if !k.Defined() { - log.Error("undefined cid in GetBlock") - return nil, ipld.ErrNotFound{Cid: k} - } - - // Any async work initiated by this function must end when this function - // returns. To ensure this, derive a new context. Note that it is okay to - // listen on parent in this scope, but NOT okay to pass |parent| to - // functions called by this one. Otherwise those functions won't return - // when this context's cancel func is executed. This is difficult to - // enforce. May this comment keep you safe. - ctx, cancel := context.WithCancel(p) - defer cancel() - - promise, err := gb(ctx, []cid.Cid{k}) - if err != nil { - return nil, err - } - - select { - case block, ok := <-promise: - if !ok { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - return nil, errors.New("promise channel was closed") - } - } - return block, nil - case <-p.Done(): - return nil, p.Err() - } -} - -// WantFunc is any function that can express a want for set of blocks. -type WantFunc func(context.Context, []cid.Cid) - -// AsyncGetBlocks take a set of block cids, a pubsub channel for incoming -// blocks, a want function, and a close function, and returns a channel of -// incoming blocks. -func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid, notif notifications.PubSub, - want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { - ctx, span := internal.StartSpan(ctx, "Getter.AsyncGetBlocks") - defer span.End() - - // If there are no keys supplied, just return a closed channel - if len(keys) == 0 { - out := make(chan blocks.Block) - close(out) - return out, nil - } - - // Use a PubSub notifier to listen for incoming blocks for each key - remaining := cid.NewSet() - promise := notif.Subscribe(ctx, keys...) - for _, k := range keys { - log.Debugw("Bitswap.GetBlockRequest.Start", "cid", k) - remaining.Add(k) - } - - // Send the want request for the keys to the network - want(ctx, keys) - - out := make(chan blocks.Block) - go handleIncoming(ctx, sessctx, remaining, promise, out, cwants) - return out, nil -} - -// Listens for incoming blocks, passing them to the out channel. -// If the context is cancelled or the incoming channel closes, calls cfun with -// any keys corresponding to blocks that were never received. -func handleIncoming(ctx context.Context, sessctx context.Context, remaining *cid.Set, - in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid)) { - - ctx, cancel := context.WithCancel(ctx) - - // Clean up before exiting this function, and call the cancel function on - // any remaining keys - defer func() { - cancel() - close(out) - // can't just defer this call on its own, arguments are resolved *when* the defer is created - cfun(remaining.Keys()) - }() - - for { - select { - case blk, ok := <-in: - // If the channel is closed, we're done (note that PubSub closes - // the channel once all the keys have been received) - if !ok { - return - } - - remaining.Remove(blk.Cid()) - select { - case out <- blk: - case <-ctx.Done(): - return - case <-sessctx.Done(): - return - } - case <-ctx.Done(): - return - case <-sessctx.Done(): - return - } - } -} diff --git a/client/internal/messagequeue/donthavetimeoutmgr.go b/client/internal/messagequeue/donthavetimeoutmgr.go deleted file mode 100644 index e1b42c42..00000000 --- a/client/internal/messagequeue/donthavetimeoutmgr.go +++ /dev/null @@ -1,398 +0,0 @@ -package messagequeue - -import ( - "context" - "sync" - "time" - - "github.com/benbjohnson/clock" - cid "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" -) - -const ( - // dontHaveTimeout is used to simulate a DONT_HAVE when communicating with - // a peer whose Bitswap client doesn't support the DONT_HAVE response, - // or when the peer takes too long to respond. - // If the peer doesn't respond to a want-block within the timeout, the - // local node assumes that the peer doesn't have the block. - dontHaveTimeout = 5 * time.Second - - // maxExpectedWantProcessTime is the maximum amount of time we expect a - // peer takes to process a want and initiate sending a response to us - maxExpectedWantProcessTime = 2 * time.Second - - // maxTimeout is the maximum allowed timeout, regardless of latency - maxTimeout = dontHaveTimeout + maxExpectedWantProcessTime - - // pingLatencyMultiplier is multiplied by the average ping time to - // get an upper bound on how long we expect to wait for a peer's response - // to arrive - pingLatencyMultiplier = 3 - - // messageLatencyAlpha is the alpha supplied to the message latency EWMA - messageLatencyAlpha = 0.5 - - // To give a margin for error, the timeout is calculated as - // messageLatencyMultiplier * message latency - messageLatencyMultiplier = 2 -) - -// PeerConnection is a connection to a peer that can be pinged, and the -// average latency measured -type PeerConnection interface { - // Ping the peer - Ping(context.Context) ping.Result - // The average latency of all pings - Latency() time.Duration -} - -// pendingWant keeps track of a want that has been sent and we're waiting -// for a response or for a timeout to expire -type pendingWant struct { - c cid.Cid - active bool - sent time.Time -} - -// dontHaveTimeoutMgr simulates a DONT_HAVE message if the peer takes too long -// to respond to a message. -// The timeout is based on latency - we start with a default latency, while -// we ping the peer to estimate latency. If we receive a response from the -// peer we use the response latency. -type dontHaveTimeoutMgr struct { - clock clock.Clock - ctx context.Context - shutdown func() - peerConn PeerConnection - onDontHaveTimeout func([]cid.Cid) - defaultTimeout time.Duration - maxTimeout time.Duration - pingLatencyMultiplier int - messageLatencyMultiplier int - maxExpectedWantProcessTime time.Duration - - // All variables below here must be protected by the lock - lk sync.RWMutex - // has the timeout manager started - started bool - // wants that are active (waiting for a response or timeout) - activeWants map[cid.Cid]*pendingWant - // queue of wants, from oldest to newest - wantQueue []*pendingWant - // time to wait for a response (depends on latency) - timeout time.Duration - // ewma of message latency (time from message sent to response received) - messageLatency *latencyEwma - // timer used to wait until want at front of queue expires - checkForTimeoutsTimer *clock.Timer - // used for testing -- timeoutsTriggered when a scheduled dont have timeouts were triggered - timeoutsTriggered chan struct{} -} - -// newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr -// onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) -func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid), clock clock.Clock) *dontHaveTimeoutMgr { - return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, maxTimeout, - pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime, clock, nil) -} - -// newDontHaveTimeoutMgrWithParams is used by the tests -func newDontHaveTimeoutMgrWithParams( - pc PeerConnection, - onDontHaveTimeout func([]cid.Cid), - defaultTimeout time.Duration, - maxTimeout time.Duration, - pingLatencyMultiplier int, - messageLatencyMultiplier int, - maxExpectedWantProcessTime time.Duration, - clock clock.Clock, - timeoutsTriggered chan struct{}) *dontHaveTimeoutMgr { - - ctx, shutdown := context.WithCancel(context.Background()) - mqp := &dontHaveTimeoutMgr{ - clock: clock, - ctx: ctx, - shutdown: shutdown, - peerConn: pc, - activeWants: make(map[cid.Cid]*pendingWant), - timeout: defaultTimeout, - messageLatency: &latencyEwma{alpha: messageLatencyAlpha}, - defaultTimeout: defaultTimeout, - maxTimeout: maxTimeout, - pingLatencyMultiplier: pingLatencyMultiplier, - messageLatencyMultiplier: messageLatencyMultiplier, - maxExpectedWantProcessTime: maxExpectedWantProcessTime, - onDontHaveTimeout: onDontHaveTimeout, - timeoutsTriggered: timeoutsTriggered, - } - - return mqp -} - -// Shutdown the dontHaveTimeoutMgr. Any subsequent call to Start() will be ignored -func (dhtm *dontHaveTimeoutMgr) Shutdown() { - dhtm.shutdown() - - dhtm.lk.Lock() - defer dhtm.lk.Unlock() - - // Clear any pending check for timeouts - if dhtm.checkForTimeoutsTimer != nil { - dhtm.checkForTimeoutsTimer.Stop() - } -} - -// Start the dontHaveTimeoutMgr. This method is idempotent -func (dhtm *dontHaveTimeoutMgr) Start() { - dhtm.lk.Lock() - defer dhtm.lk.Unlock() - - // Make sure the dont have timeout manager hasn't already been started - if dhtm.started { - return - } - dhtm.started = true - - // If we already have a measure of latency to the peer, use it to - // calculate a reasonable timeout - latency := dhtm.peerConn.Latency() - if latency.Nanoseconds() > 0 { - dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) - return - } - - // Otherwise measure latency by pinging the peer - go dhtm.measurePingLatency() -} - -// UpdateMessageLatency is called when we receive a response from the peer. -// It is the time between sending a request and receiving the corresponding -// response. -func (dhtm *dontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { - dhtm.lk.Lock() - defer dhtm.lk.Unlock() - - // Update the message latency and the timeout - dhtm.messageLatency.update(elapsed) - oldTimeout := dhtm.timeout - dhtm.timeout = dhtm.calculateTimeoutFromMessageLatency() - - // If the timeout has decreased - if dhtm.timeout < oldTimeout { - // Check if after changing the timeout there are any pending wants that - // are now over the timeout - dhtm.checkForTimeouts() - } -} - -// measurePingLatency measures the latency to the peer by pinging it -func (dhtm *dontHaveTimeoutMgr) measurePingLatency() { - // Wait up to defaultTimeout for a response to the ping - ctx, cancel := context.WithTimeout(dhtm.ctx, dhtm.defaultTimeout) - defer cancel() - - // Ping the peer - res := dhtm.peerConn.Ping(ctx) - if res.Error != nil { - // If there was an error, we'll just leave the timeout as - // defaultTimeout - return - } - - // Get the average latency to the peer - latency := dhtm.peerConn.Latency() - - dhtm.lk.Lock() - defer dhtm.lk.Unlock() - - // A message has arrived so we already set the timeout based on message latency - if dhtm.messageLatency.samples > 0 { - return - } - - // Calculate a reasonable timeout based on latency - dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) - - // Check if after changing the timeout there are any pending wants that are - // now over the timeout - dhtm.checkForTimeouts() -} - -// checkForTimeouts checks pending wants to see if any are over the timeout. -// Note: this function should only be called within the lock. -func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { - - if len(dhtm.wantQueue) == 0 { - return - } - - // Figure out which of the blocks that were wanted were not received - // within the timeout - expired := make([]cid.Cid, 0, len(dhtm.activeWants)) - for len(dhtm.wantQueue) > 0 { - pw := dhtm.wantQueue[0] - - // If the want is still active - if pw.active { - // The queue is in order from earliest to latest, so if we - // didn't find an expired entry we can stop iterating - if dhtm.clock.Since(pw.sent) < dhtm.timeout { - break - } - - // Add the want to the expired list - expired = append(expired, pw.c) - // Remove the want from the activeWants map - delete(dhtm.activeWants, pw.c) - } - - // Remove expired or cancelled wants from the want queue - dhtm.wantQueue = dhtm.wantQueue[1:] - } - - // Fire the timeout event for the expired wants - if len(expired) > 0 { - go dhtm.fireTimeout(expired) - } - - if len(dhtm.wantQueue) == 0 { - return - } - - // Make sure the timeout manager is still running - if dhtm.ctx.Err() != nil { - return - } - - // Schedule the next check for the moment when the oldest pending want will - // timeout - oldestStart := dhtm.wantQueue[0].sent - until := oldestStart.Add(dhtm.timeout).Sub(dhtm.clock.Now()) - if dhtm.checkForTimeoutsTimer == nil { - dhtm.checkForTimeoutsTimer = dhtm.clock.Timer(until) - go dhtm.consumeTimeouts() - } else { - dhtm.checkForTimeoutsTimer.Stop() - dhtm.checkForTimeoutsTimer.Reset(until) - } -} - -func (dhtm *dontHaveTimeoutMgr) consumeTimeouts() { - for { - select { - case <-dhtm.ctx.Done(): - return - case <-dhtm.checkForTimeoutsTimer.C: - dhtm.lk.Lock() - dhtm.checkForTimeouts() - dhtm.lk.Unlock() - } - } -} - -// AddPending adds the given keys that will expire if not cancelled before -// the timeout -func (dhtm *dontHaveTimeoutMgr) AddPending(ks []cid.Cid) { - if len(ks) == 0 { - return - } - - start := dhtm.clock.Now() - - dhtm.lk.Lock() - defer dhtm.lk.Unlock() - - queueWasEmpty := len(dhtm.activeWants) == 0 - - // Record the start time for each key - for _, c := range ks { - if _, ok := dhtm.activeWants[c]; !ok { - pw := pendingWant{ - c: c, - sent: start, - active: true, - } - dhtm.activeWants[c] = &pw - dhtm.wantQueue = append(dhtm.wantQueue, &pw) - } - } - - // If there was already an earlier pending item in the queue, then there - // must already be a timeout check scheduled. If there is nothing in the - // queue then we should make sure to schedule a check. - if queueWasEmpty { - dhtm.checkForTimeouts() - } -} - -// CancelPending is called when we receive a response for a key -func (dhtm *dontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { - dhtm.lk.Lock() - defer dhtm.lk.Unlock() - - // Mark the wants as cancelled - for _, c := range ks { - if pw, ok := dhtm.activeWants[c]; ok { - pw.active = false - delete(dhtm.activeWants, c) - } - } -} - -// fireTimeout fires the onDontHaveTimeout method with the timed out keys -func (dhtm *dontHaveTimeoutMgr) fireTimeout(pending []cid.Cid) { - // Make sure the timeout manager has not been shut down - if dhtm.ctx.Err() != nil { - return - } - - // Fire the timeout - dhtm.onDontHaveTimeout(pending) - - // signal a timeout fired - if dhtm.timeoutsTriggered != nil { - dhtm.timeoutsTriggered <- struct{}{} - } -} - -// calculateTimeoutFromPingLatency calculates a reasonable timeout derived from latency -func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromPingLatency(latency time.Duration) time.Duration { - // The maximum expected time for a response is - // the expected time to process the want + (latency * multiplier) - // The multiplier is to provide some padding for variable latency. - timeout := dhtm.maxExpectedWantProcessTime + time.Duration(dhtm.pingLatencyMultiplier)*latency - if timeout > dhtm.maxTimeout { - timeout = dhtm.maxTimeout - } - return timeout -} - -// calculateTimeoutFromMessageLatency calculates a timeout derived from message latency -func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromMessageLatency() time.Duration { - timeout := dhtm.messageLatency.latency * time.Duration(dhtm.messageLatencyMultiplier) - if timeout > dhtm.maxTimeout { - timeout = dhtm.maxTimeout - } - return timeout -} - -// latencyEwma is an EWMA of message latency -type latencyEwma struct { - alpha float64 - samples uint64 - latency time.Duration -} - -// update the EWMA with the given sample -func (le *latencyEwma) update(elapsed time.Duration) { - le.samples++ - - // Initially set alpha to be 1.0 / - alpha := 1.0 / float64(le.samples) - if alpha < le.alpha { - // Once we have enough samples, clamp alpha - alpha = le.alpha - } - le.latency = time.Duration(float64(elapsed)*alpha + (1-alpha)*float64(le.latency)) -} diff --git a/client/internal/messagequeue/donthavetimeoutmgr_test.go b/client/internal/messagequeue/donthavetimeoutmgr_test.go deleted file mode 100644 index 6a31242a..00000000 --- a/client/internal/messagequeue/donthavetimeoutmgr_test.go +++ /dev/null @@ -1,449 +0,0 @@ -package messagequeue - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "github.com/benbjohnson/clock" - "github.com/ipfs/go-bitswap/internal/testutil" - cid "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" -) - -type mockPeerConn struct { - err error - latency time.Duration - latencies []time.Duration - clock clock.Clock - pinged chan struct{} -} - -func (pc *mockPeerConn) Ping(ctx context.Context) ping.Result { - timer := pc.clock.Timer(pc.latency) - pc.pinged <- struct{}{} - select { - case <-timer.C: - if pc.err != nil { - return ping.Result{Error: pc.err} - } - pc.latencies = append(pc.latencies, pc.latency) - case <-ctx.Done(): - } - return ping.Result{RTT: pc.latency} -} - -func (pc *mockPeerConn) Latency() time.Duration { - sum := time.Duration(0) - if len(pc.latencies) == 0 { - return sum - } - for _, l := range pc.latencies { - sum += l - } - return sum / time.Duration(len(pc.latencies)) -} - -type timeoutRecorder struct { - timedOutKs []cid.Cid - lk sync.Mutex -} - -func (tr *timeoutRecorder) onTimeout(tks []cid.Cid) { - tr.lk.Lock() - defer tr.lk.Unlock() - - tr.timedOutKs = append(tr.timedOutKs, tks...) -} - -func (tr *timeoutRecorder) timedOutCount() int { - tr.lk.Lock() - defer tr.lk.Unlock() - - return len(tr.timedOutKs) -} - -func (tr *timeoutRecorder) clear() { - tr.lk.Lock() - defer tr.lk.Unlock() - - tr.timedOutKs = nil -} - -func TestDontHaveTimeoutMgrTimeout(t *testing.T) { - firstks := testutil.GenerateCids(2) - secondks := append(firstks, testutil.GenerateCids(3)...) - latency := time.Millisecond * 20 - latMultiplier := 2 - expProcessTime := 5 * time.Millisecond - expectedTimeout := expProcessTime + latency*time.Duration(latMultiplier) - clock := clock.NewMock() - pinged := make(chan struct{}) - pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} - tr := timeoutRecorder{} - timeoutsTriggered := make(chan struct{}) - dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) - dhtm.Start() - defer dhtm.Shutdown() - <-pinged - // Add first set of keys - dhtm.AddPending(firstks) - - // Wait for less than the expected timeout - clock.Add(expectedTimeout - 10*time.Millisecond) - - // At this stage no keys should have timed out - if tr.timedOutCount() > 0 { - t.Fatal("expected timeout not to have happened yet") - } - - // Add second set of keys - dhtm.AddPending(secondks) - - // Wait until after the expected timeout - clock.Add(20 * time.Millisecond) - - <-timeoutsTriggered - - // At this stage first set of keys should have timed out - if tr.timedOutCount() != len(firstks) { - t.Fatal("expected timeout", tr.timedOutCount(), len(firstks)) - } - // Clear the recorded timed out keys - tr.clear() - - // Sleep until the second set of keys should have timed out - clock.Add(expectedTimeout + 10*time.Millisecond) - - <-timeoutsTriggered - - // At this stage all keys should have timed out. The second set included - // the first set of keys, but they were added before the first set timed - // out, so only the remaining keys should have beed added. - if tr.timedOutCount() != len(secondks)-len(firstks) { - t.Fatal("expected second set of keys to timeout") - } -} - -func TestDontHaveTimeoutMgrCancel(t *testing.T) { - ks := testutil.GenerateCids(3) - latency := time.Millisecond * 10 - latMultiplier := 1 - expProcessTime := time.Duration(0) - expectedTimeout := latency - clock := clock.NewMock() - pinged := make(chan struct{}) - pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} - tr := timeoutRecorder{} - timeoutsTriggered := make(chan struct{}) - dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) - dhtm.Start() - defer dhtm.Shutdown() - <-pinged - - // Add keys - dhtm.AddPending(ks) - clock.Add(5 * time.Millisecond) - - // Cancel keys - cancelCount := 1 - dhtm.CancelPending(ks[:cancelCount]) - - // Wait for the expected timeout - clock.Add(expectedTimeout) - - <-timeoutsTriggered - - // At this stage all non-cancelled keys should have timed out - if tr.timedOutCount() != len(ks)-cancelCount { - t.Fatal("expected timeout") - } -} - -func TestDontHaveTimeoutWantCancelWant(t *testing.T) { - ks := testutil.GenerateCids(3) - latency := time.Millisecond * 20 - latMultiplier := 1 - expProcessTime := time.Duration(0) - expectedTimeout := latency - clock := clock.NewMock() - pinged := make(chan struct{}) - pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} - tr := timeoutRecorder{} - timeoutsTriggered := make(chan struct{}) - - dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) - dhtm.Start() - defer dhtm.Shutdown() - <-pinged - - // Add keys - dhtm.AddPending(ks) - - // Wait for a short time - clock.Add(expectedTimeout - 10*time.Millisecond) - - // Cancel two keys - dhtm.CancelPending(ks[:2]) - - clock.Add(5 * time.Millisecond) - - // Add back one cancelled key - dhtm.AddPending(ks[:1]) - - // Wait till after initial timeout - clock.Add(10 * time.Millisecond) - - <-timeoutsTriggered - - // At this stage only the key that was never cancelled should have timed out - if tr.timedOutCount() != 1 { - t.Fatal("expected one key to timeout") - } - - // Wait till after added back key should time out - clock.Add(latency) - - <-timeoutsTriggered - - // At this stage the key that was added back should also have timed out - if tr.timedOutCount() != 2 { - t.Fatal("expected added back key to timeout") - } -} - -func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { - ks := testutil.GenerateCids(10) - latency := time.Millisecond * 5 - latMultiplier := 1 - expProcessTime := time.Duration(0) - clock := clock.NewMock() - pinged := make(chan struct{}) - pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} - tr := timeoutRecorder{} - timeoutsTriggered := make(chan struct{}) - - dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) - dhtm.Start() - defer dhtm.Shutdown() - <-pinged - - // Add keys repeatedly - for _, c := range ks { - dhtm.AddPending([]cid.Cid{c}) - } - - // Wait for the expected timeout - clock.Add(latency + 5*time.Millisecond) - - <-timeoutsTriggered - - // At this stage all keys should have timed out - if tr.timedOutCount() != len(ks) { - t.Fatal("expected timeout") - } -} - -func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { - ks := testutil.GenerateCids(2) - latency := time.Millisecond * 40 - latMultiplier := 1 - expProcessTime := time.Duration(0) - msgLatencyMultiplier := 1 - clock := clock.NewMock() - pinged := make(chan struct{}) - pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} - tr := timeoutRecorder{} - timeoutsTriggered := make(chan struct{}) - - dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, msgLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) - dhtm.Start() - defer dhtm.Shutdown() - <-pinged - // Add keys - dhtm.AddPending(ks) - - // expectedTimeout - // = expProcessTime + latency*time.Duration(latMultiplier) - // = 0 + 40ms * 1 - // = 40ms - - // Wait for less than the expected timeout - clock.Add(25 * time.Millisecond) - - // Receive two message latency updates - dhtm.UpdateMessageLatency(time.Millisecond * 20) - dhtm.UpdateMessageLatency(time.Millisecond * 10) - - // alpha is 0.5 so timeout should be - // = (20ms * alpha) + (10ms * (1 - alpha)) - // = (20ms * 0.5) + (10ms * 0.5) - // = 15ms - // We've already slept for 25ms so with the new 15ms timeout - // the keys should have timed out - - // Give the queue some time to process the updates - clock.Add(5 * time.Millisecond) - - <-timeoutsTriggered - - if tr.timedOutCount() != len(ks) { - t.Fatal("expected keys to timeout") - } -} - -func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { - ks := testutil.GenerateCids(2) - clock := clock.NewMock() - pinged := make(chan struct{}) - pc := &mockPeerConn{latency: time.Second, clock: clock, pinged: pinged} - tr := timeoutRecorder{} - msgLatencyMultiplier := 1 - testMaxTimeout := time.Millisecond * 10 - timeoutsTriggered := make(chan struct{}) - - dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, testMaxTimeout, pingLatencyMultiplier, msgLatencyMultiplier, maxExpectedWantProcessTime, clock, timeoutsTriggered) - dhtm.Start() - defer dhtm.Shutdown() - <-pinged - // Add keys - dhtm.AddPending(ks) - - // Receive a message latency update that would make the timeout greater - // than the maximum timeout - dhtm.UpdateMessageLatency(testMaxTimeout * 4) - - // Sleep until just after the maximum timeout - clock.Add(testMaxTimeout + 5*time.Millisecond) - - <-timeoutsTriggered - - // Keys should have timed out - if tr.timedOutCount() != len(ks) { - t.Fatal("expected keys to timeout") - } -} - -func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { - ks := testutil.GenerateCids(2) - latency := time.Millisecond * 1 - latMultiplier := 2 - expProcessTime := 2 * time.Millisecond - defaultTimeout := 10 * time.Millisecond - expectedTimeout := expProcessTime + defaultTimeout - tr := timeoutRecorder{} - clock := clock.NewMock() - pinged := make(chan struct{}) - pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged, err: fmt.Errorf("ping error")} - timeoutsTriggered := make(chan struct{}) - - dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) - dhtm.Start() - defer dhtm.Shutdown() - <-pinged - - // Add keys - dhtm.AddPending(ks) - - // Sleep for less than the expected timeout - clock.Add(expectedTimeout - 5*time.Millisecond) - - // At this stage no timeout should have happened yet - if tr.timedOutCount() > 0 { - t.Fatal("expected timeout not to have happened yet") - } - - // Sleep until after the expected timeout - clock.Add(10 * time.Millisecond) - - <-timeoutsTriggered - - // Now the keys should have timed out - if tr.timedOutCount() != len(ks) { - t.Fatal("expected timeout") - } -} - -func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { - ks := testutil.GenerateCids(2) - latency := time.Millisecond * 200 - latMultiplier := 1 - expProcessTime := time.Duration(0) - defaultTimeout := 100 * time.Millisecond - clock := clock.NewMock() - pinged := make(chan struct{}) - pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} - tr := timeoutRecorder{} - timeoutsTriggered := make(chan struct{}) - - dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) - dhtm.Start() - defer dhtm.Shutdown() - <-pinged - - // Add keys - dhtm.AddPending(ks) - - // Sleep for less than the default timeout - clock.Add(defaultTimeout - 50*time.Millisecond) - - // At this stage no timeout should have happened yet - if tr.timedOutCount() > 0 { - t.Fatal("expected timeout not to have happened yet") - } - - // Sleep until after the default timeout - clock.Add(defaultTimeout * 2) - - <-timeoutsTriggered - - // Now the keys should have timed out - if tr.timedOutCount() != len(ks) { - t.Fatal("expected timeout") - } -} - -func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { - ks := testutil.GenerateCids(2) - latency := time.Millisecond * 10 - latMultiplier := 1 - expProcessTime := time.Duration(0) - clock := clock.NewMock() - pinged := make(chan struct{}) - pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} - tr := timeoutRecorder{} - timeoutsTriggered := make(chan struct{}) - - dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) - dhtm.Start() - defer dhtm.Shutdown() - <-pinged - - // Add keys - dhtm.AddPending(ks) - - // Wait less than the timeout - clock.Add(latency - 5*time.Millisecond) - - // Shutdown the manager - dhtm.Shutdown() - - // Wait for the expected timeout - clock.Add(10 * time.Millisecond) - - // Manager was shut down so timeout should not have fired - if tr.timedOutCount() != 0 { - t.Fatal("expected no timeout after shutdown") - } -} diff --git a/client/internal/messagequeue/messagequeue.go b/client/internal/messagequeue/messagequeue.go deleted file mode 100644 index b80d71ee..00000000 --- a/client/internal/messagequeue/messagequeue.go +++ /dev/null @@ -1,843 +0,0 @@ -package messagequeue - -import ( - "context" - "math" - "sync" - "time" - - "github.com/benbjohnson/clock" - bswl "github.com/ipfs/go-bitswap/client/wantlist" - bsmsg "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" - bsnet "github.com/ipfs/go-bitswap/network" - cid "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" - peer "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" - "go.uber.org/zap" -) - -var log = logging.Logger("bitswap") -var sflog = log.Desugar() - -const ( - defaultRebroadcastInterval = 30 * time.Second - // maxRetries is the number of times to attempt to send a message before - // giving up - maxRetries = 3 - sendTimeout = 30 * time.Second - // maxMessageSize is the maximum message size in bytes - maxMessageSize = 1024 * 1024 * 2 - // sendErrorBackoff is the time to wait before retrying to connect after - // an error when trying to send a message - sendErrorBackoff = 100 * time.Millisecond - // maxPriority is the max priority as defined by the bitswap protocol - maxPriority = math.MaxInt32 - // sendMessageDebounce is the debounce duration when calling sendMessage() - sendMessageDebounce = time.Millisecond - // when we reach sendMessageCutoff wants/cancels, we'll send the message immediately. - sendMessageCutoff = 256 - // when we debounce for more than sendMessageMaxDelay, we'll send the - // message immediately. - sendMessageMaxDelay = 20 * time.Millisecond - // The maximum amount of time in which to accept a response as being valid - // for latency calculation (as opposed to discarding it as an outlier) - maxValidLatency = 30 * time.Second -) - -// MessageNetwork is any network that can connect peers and generate a message -// sender. -type MessageNetwork interface { - ConnectTo(context.Context, peer.ID) error - NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) - Latency(peer.ID) time.Duration - Ping(context.Context, peer.ID) ping.Result - Self() peer.ID -} - -// MessageQueue implements queue of want messages to send to peers. -type MessageQueue struct { - ctx context.Context - shutdown func() - p peer.ID - network MessageNetwork - dhTimeoutMgr DontHaveTimeoutManager - - // The maximum size of a message in bytes. Any overflow is put into the - // next message - maxMessageSize int - - // The amount of time to wait when there's an error sending to a peer - // before retrying - sendErrorBackoff time.Duration - - // The maximum amount of time in which to accept a response as being valid - // for latency calculation - maxValidLatency time.Duration - - // Signals that there are outgoing wants / cancels ready to be processed - outgoingWork chan time.Time - - // Channel of CIDs of blocks / HAVEs / DONT_HAVEs received from the peer - responses chan []cid.Cid - - // Take lock whenever any of these variables are modified - wllock sync.Mutex - bcstWants recallWantlist - peerWants recallWantlist - cancels *cid.Set - priority int32 - - // Dont touch any of these variables outside of run loop - sender bsnet.MessageSender - rebroadcastIntervalLk sync.RWMutex - rebroadcastInterval time.Duration - rebroadcastTimer *clock.Timer - // For performance reasons we just clear out the fields of the message - // instead of creating a new one every time. - msg bsmsg.BitSwapMessage - - // For simulating time -- uses mock in test - clock clock.Clock - - // Used to track things that happen asynchronously -- used only in test - events chan messageEvent -} - -// recallWantlist keeps a list of pending wants and a list of sent wants -type recallWantlist struct { - // The list of wants that have not yet been sent - pending *bswl.Wantlist - // The list of wants that have been sent - sent *bswl.Wantlist - // The time at which each want was sent - sentAt map[cid.Cid]time.Time -} - -func newRecallWantList() recallWantlist { - return recallWantlist{ - pending: bswl.New(), - sent: bswl.New(), - sentAt: make(map[cid.Cid]time.Time), - } -} - -// Add want to the pending list -func (r *recallWantlist) Add(c cid.Cid, priority int32, wtype pb.Message_Wantlist_WantType) { - r.pending.Add(c, priority, wtype) -} - -// Remove wants from both the pending list and the list of sent wants -func (r *recallWantlist) Remove(c cid.Cid) { - r.pending.Remove(c) - r.sent.Remove(c) - delete(r.sentAt, c) -} - -// Remove wants by type from both the pending list and the list of sent wants -func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantType) { - r.pending.RemoveType(c, wtype) - r.sent.RemoveType(c, wtype) - if _, ok := r.sent.Contains(c); !ok { - delete(r.sentAt, c) - } -} - -// MarkSent moves the want from the pending to the sent list -// -// Returns true if the want was marked as sent. Returns false if the want wasn't -// pending. -func (r *recallWantlist) MarkSent(e bswl.Entry) bool { - if !r.pending.RemoveType(e.Cid, e.WantType) { - return false - } - r.sent.Add(e.Cid, e.Priority, e.WantType) - return true -} - -// SentAt records the time at which a want was sent -func (r *recallWantlist) SentAt(c cid.Cid, at time.Time) { - // The want may have been cancelled in the interim - if _, ok := r.sent.Contains(c); ok { - if _, ok := r.sentAt[c]; !ok { - r.sentAt[c] = at - } - } -} - -// ClearSentAt clears out the record of the time a want was sent. -// We clear the sent at time when we receive a response for a key as we -// only need the first response for latency measurement. -func (r *recallWantlist) ClearSentAt(c cid.Cid) { - delete(r.sentAt, c) -} - -type peerConn struct { - p peer.ID - network MessageNetwork -} - -func newPeerConnection(p peer.ID, network MessageNetwork) *peerConn { - return &peerConn{p, network} -} - -func (pc *peerConn) Ping(ctx context.Context) ping.Result { - return pc.network.Ping(ctx, pc.p) -} - -func (pc *peerConn) Latency() time.Duration { - return pc.network.Latency(pc.p) -} - -// Fires when a timeout occurs waiting for a response from a peer running an -// older version of Bitswap that doesn't support DONT_HAVE messages. -type OnDontHaveTimeout func(peer.ID, []cid.Cid) - -// DontHaveTimeoutManager pings a peer to estimate latency so it can set a reasonable -// upper bound on when to consider a DONT_HAVE request as timed out (when connected to -// a peer that doesn't support DONT_HAVE messages) -type DontHaveTimeoutManager interface { - // Start the manager (idempotent) - Start() - // Shutdown the manager (Shutdown is final, manager cannot be restarted) - Shutdown() - // AddPending adds the wants as pending a response. If the are not - // cancelled before the timeout, the OnDontHaveTimeout method will be called. - AddPending([]cid.Cid) - // CancelPending removes the wants - CancelPending([]cid.Cid) - // UpdateMessageLatency informs the manager of a new latency measurement - UpdateMessageLatency(time.Duration) -} - -// New creates a new MessageQueue. -func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeout OnDontHaveTimeout) *MessageQueue { - onTimeout := func(ks []cid.Cid) { - log.Infow("Bitswap: timeout waiting for blocks", "cids", ks, "peer", p) - onDontHaveTimeout(p, ks) - } - clock := clock.New() - dhTimeoutMgr := newDontHaveTimeoutMgr(newPeerConnection(p, network), onTimeout, clock) - return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, maxValidLatency, dhTimeoutMgr, clock, nil) -} - -type messageEvent int - -const ( - messageQueued messageEvent = iota - messageFinishedSending - latenciesRecorded -) - -// This constructor is used by the tests -func newMessageQueue( - ctx context.Context, - p peer.ID, - network MessageNetwork, - maxMsgSize int, - sendErrorBackoff time.Duration, - maxValidLatency time.Duration, - dhTimeoutMgr DontHaveTimeoutManager, - clock clock.Clock, - events chan messageEvent) *MessageQueue { - - ctx, cancel := context.WithCancel(ctx) - return &MessageQueue{ - ctx: ctx, - shutdown: cancel, - p: p, - network: network, - dhTimeoutMgr: dhTimeoutMgr, - maxMessageSize: maxMsgSize, - bcstWants: newRecallWantList(), - peerWants: newRecallWantList(), - cancels: cid.NewSet(), - outgoingWork: make(chan time.Time, 1), - responses: make(chan []cid.Cid, 8), - rebroadcastInterval: defaultRebroadcastInterval, - sendErrorBackoff: sendErrorBackoff, - maxValidLatency: maxValidLatency, - priority: maxPriority, - // For performance reasons we just clear out the fields of the message - // after using it, instead of creating a new one every time. - msg: bsmsg.New(false), - clock: clock, - events: events, - } -} - -// Add want-haves that are part of a broadcast to all connected peers -func (mq *MessageQueue) AddBroadcastWantHaves(wantHaves []cid.Cid) { - if len(wantHaves) == 0 { - return - } - - mq.wllock.Lock() - defer mq.wllock.Unlock() - - for _, c := range wantHaves { - mq.bcstWants.Add(c, mq.priority, pb.Message_Wantlist_Have) - mq.priority-- - - // We're adding a want-have for the cid, so clear any pending cancel - // for the cid - mq.cancels.Remove(c) - } - - // Schedule a message send - mq.signalWorkReady() -} - -// Add want-haves and want-blocks for the peer for this message queue. -func (mq *MessageQueue) AddWants(wantBlocks []cid.Cid, wantHaves []cid.Cid) { - if len(wantBlocks) == 0 && len(wantHaves) == 0 { - return - } - - mq.wllock.Lock() - defer mq.wllock.Unlock() - - for _, c := range wantHaves { - mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Have) - mq.priority-- - - // We're adding a want-have for the cid, so clear any pending cancel - // for the cid - mq.cancels.Remove(c) - } - for _, c := range wantBlocks { - mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Block) - mq.priority-- - - // We're adding a want-block for the cid, so clear any pending cancel - // for the cid - mq.cancels.Remove(c) - } - - // Schedule a message send - mq.signalWorkReady() -} - -// Add cancel messages for the given keys. -func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { - if len(cancelKs) == 0 { - return - } - - // Cancel any outstanding DONT_HAVE timers - mq.dhTimeoutMgr.CancelPending(cancelKs) - - mq.wllock.Lock() - - workReady := false - - // Remove keys from broadcast and peer wants, and add to cancels - for _, c := range cancelKs { - // Check if a want for the key was sent - _, wasSentBcst := mq.bcstWants.sent.Contains(c) - _, wasSentPeer := mq.peerWants.sent.Contains(c) - - // Remove the want from tracking wantlists - mq.bcstWants.Remove(c) - mq.peerWants.Remove(c) - - // Only send a cancel if a want was sent - if wasSentBcst || wasSentPeer { - mq.cancels.Add(c) - workReady = true - } - } - - mq.wllock.Unlock() - - // Unlock first to be nice to the scheduler. - - // Schedule a message send - if workReady { - mq.signalWorkReady() - } -} - -// ResponseReceived is called when a message is received from the network. -// ks is the set of blocks, HAVEs and DONT_HAVEs in the message -// Note that this is just used to calculate latency. -func (mq *MessageQueue) ResponseReceived(ks []cid.Cid) { - if len(ks) == 0 { - return - } - - // These messages are just used to approximate latency, so if we get so - // many responses that they get backed up, just ignore the overflow. - select { - case mq.responses <- ks: - default: - } -} - -// SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist -func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { - mq.rebroadcastIntervalLk.Lock() - mq.rebroadcastInterval = delay - if mq.rebroadcastTimer != nil { - mq.rebroadcastTimer.Reset(delay) - } - mq.rebroadcastIntervalLk.Unlock() -} - -// Startup starts the processing of messages and rebroadcasting. -func (mq *MessageQueue) Startup() { - mq.rebroadcastIntervalLk.RLock() - mq.rebroadcastTimer = mq.clock.Timer(mq.rebroadcastInterval) - mq.rebroadcastIntervalLk.RUnlock() - go mq.runQueue() -} - -// Shutdown stops the processing of messages for a message queue. -func (mq *MessageQueue) Shutdown() { - mq.shutdown() -} - -func (mq *MessageQueue) onShutdown() { - // Shut down the DONT_HAVE timeout manager - mq.dhTimeoutMgr.Shutdown() - - // Reset the streamMessageSender - if mq.sender != nil { - _ = mq.sender.Reset() - } -} - -func (mq *MessageQueue) runQueue() { - defer mq.onShutdown() - - // Create a timer for debouncing scheduled work. - scheduleWork := mq.clock.Timer(0) - if !scheduleWork.Stop() { - // Need to drain the timer if Stop() returns false - // See: https://golang.org/pkg/time/#Timer.Stop - <-scheduleWork.C - } - - var workScheduled time.Time - for mq.ctx.Err() == nil { - select { - case <-mq.rebroadcastTimer.C: - mq.rebroadcastWantlist() - - case when := <-mq.outgoingWork: - // If we have work scheduled, cancel the timer. If we - // don't, record when the work was scheduled. - // We send the time on the channel so we accurately - // track delay. - if workScheduled.IsZero() { - workScheduled = when - } else if !scheduleWork.Stop() { - // Need to drain the timer if Stop() returns false - <-scheduleWork.C - } - - // If we have too many updates and/or we've waited too - // long, send immediately. - if mq.pendingWorkCount() > sendMessageCutoff || - mq.clock.Since(workScheduled) >= sendMessageMaxDelay { - mq.sendIfReady() - workScheduled = time.Time{} - } else { - // Otherwise, extend the timer. - scheduleWork.Reset(sendMessageDebounce) - if mq.events != nil { - mq.events <- messageQueued - } - } - - case <-scheduleWork.C: - // We have work scheduled and haven't seen any updates - // in sendMessageDebounce. Send immediately. - workScheduled = time.Time{} - mq.sendIfReady() - - case res := <-mq.responses: - // We received a response from the peer, calculate latency - mq.handleResponse(res) - - case <-mq.ctx.Done(): - return - } - } -} - -// Periodically resend the list of wants to the peer -func (mq *MessageQueue) rebroadcastWantlist() { - mq.rebroadcastIntervalLk.RLock() - mq.rebroadcastTimer.Reset(mq.rebroadcastInterval) - mq.rebroadcastIntervalLk.RUnlock() - - // If some wants were transferred from the rebroadcast list - if mq.transferRebroadcastWants() { - // Send them out - mq.sendMessage() - } -} - -// Transfer wants from the rebroadcast lists into the pending lists. -func (mq *MessageQueue) transferRebroadcastWants() bool { - mq.wllock.Lock() - defer mq.wllock.Unlock() - - // Check if there are any wants to rebroadcast - if mq.bcstWants.sent.Len() == 0 && mq.peerWants.sent.Len() == 0 { - return false - } - - // Copy sent wants into pending wants lists - mq.bcstWants.pending.Absorb(mq.bcstWants.sent) - mq.peerWants.pending.Absorb(mq.peerWants.sent) - - return true -} - -func (mq *MessageQueue) signalWorkReady() { - select { - case mq.outgoingWork <- mq.clock.Now(): - default: - } -} - -func (mq *MessageQueue) sendIfReady() { - if mq.hasPendingWork() { - mq.sendMessage() - } -} - -func (mq *MessageQueue) sendMessage() { - sender, err := mq.initializeSender() - if err != nil { - // If we fail to initialize the sender, the networking layer will - // emit a Disconnect event and the MessageQueue will get cleaned up - log.Infof("Could not open message sender to peer %s: %s", mq.p, err) - mq.Shutdown() - return - } - - // Make sure the DONT_HAVE timeout manager has started - // Note: Start is idempotent - mq.dhTimeoutMgr.Start() - - // Convert want lists to a Bitswap Message - message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) - - // After processing the message, clear out its fields to save memory - defer mq.msg.Reset(false) - - if message.Empty() { - return - } - - wantlist := message.Wantlist() - mq.logOutgoingMessage(wantlist) - - if err := sender.SendMsg(mq.ctx, message); err != nil { - // If the message couldn't be sent, the networking layer will - // emit a Disconnect event and the MessageQueue will get cleaned up - log.Infof("Could not send message to peer %s: %s", mq.p, err) - mq.Shutdown() - return - } - - // Record sent time so as to calculate message latency - onSent() - - // Set a timer to wait for responses - mq.simulateDontHaveWithTimeout(wantlist) - - // If the message was too big and only a subset of wants could be - // sent, schedule sending the rest of the wants in the next - // iteration of the event loop. - if mq.hasPendingWork() { - mq.signalWorkReady() - } -} - -// If want-block times out, simulate a DONT_HAVE reponse. -// This is necessary when making requests to peers running an older version of -// Bitswap that doesn't support the DONT_HAVE response, and is also useful to -// mitigate getting blocked by a peer that takes a long time to respond. -func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { - // Get the CID of each want-block that expects a DONT_HAVE response - wants := make([]cid.Cid, 0, len(wantlist)) - - mq.wllock.Lock() - - for _, entry := range wantlist { - if entry.WantType == pb.Message_Wantlist_Block && entry.SendDontHave { - // Unlikely, but just in case check that the block hasn't been - // received in the interim - c := entry.Cid - if _, ok := mq.peerWants.sent.Contains(c); ok { - wants = append(wants, c) - } - } - } - - mq.wllock.Unlock() - - // Add wants to DONT_HAVE timeout manager - mq.dhTimeoutMgr.AddPending(wants) -} - -// handleResponse is called when a response is received from the peer, -// with the CIDs of received blocks / HAVEs / DONT_HAVEs -func (mq *MessageQueue) handleResponse(ks []cid.Cid) { - now := mq.clock.Now() - earliest := time.Time{} - - mq.wllock.Lock() - - // Check if the keys in the response correspond to any request that was - // sent to the peer. - // - // - Find the earliest request so as to calculate the longest latency as - // we want to be conservative when setting the timeout - // - Ignore latencies that are very long, as these are likely to be outliers - // caused when - // - we send a want to peer A - // - peer A does not have the block - // - peer A later receives the block from peer B - // - peer A sends us HAVE / block - for _, c := range ks { - if at, ok := mq.bcstWants.sentAt[c]; ok { - if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { - earliest = at - } - mq.bcstWants.ClearSentAt(c) - } - if at, ok := mq.peerWants.sentAt[c]; ok { - if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { - earliest = at - } - // Clear out the sent time for the CID because we only want to - // record the latency between the request and the first response - // for that CID (not subsequent responses) - mq.peerWants.ClearSentAt(c) - } - } - - mq.wllock.Unlock() - - if !earliest.IsZero() { - // Inform the timeout manager of the calculated latency - mq.dhTimeoutMgr.UpdateMessageLatency(now.Sub(earliest)) - } - if mq.events != nil { - mq.events <- latenciesRecorded - } -} - -func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { - // Save some CPU cycles and allocations if log level is higher than debug - if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { - return - } - - self := mq.network.Self() - for _, e := range wantlist { - if e.Cancel { - if e.WantType == pb.Message_Wantlist_Have { - log.Debugw("sent message", - "type", "CANCEL_WANT_HAVE", - "cid", e.Cid, - "local", self, - "to", mq.p, - ) - } else { - log.Debugw("sent message", - "type", "CANCEL_WANT_BLOCK", - "cid", e.Cid, - "local", self, - "to", mq.p, - ) - } - } else { - if e.WantType == pb.Message_Wantlist_Have { - log.Debugw("sent message", - "type", "WANT_HAVE", - "cid", e.Cid, - "local", self, - "to", mq.p, - ) - } else { - log.Debugw("sent message", - "type", "WANT_BLOCK", - "cid", e.Cid, - "local", self, - "to", mq.p, - ) - } - } - } -} - -// Whether there is work to be processed -func (mq *MessageQueue) hasPendingWork() bool { - return mq.pendingWorkCount() > 0 -} - -// The amount of work that is waiting to be processed -func (mq *MessageQueue) pendingWorkCount() int { - mq.wllock.Lock() - defer mq.wllock.Unlock() - - return mq.bcstWants.pending.Len() + mq.peerWants.pending.Len() + mq.cancels.Len() -} - -// Convert the lists of wants into a Bitswap message -func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { - // Get broadcast and regular wantlist entries. - mq.wllock.Lock() - peerEntries := mq.peerWants.pending.Entries() - bcstEntries := mq.bcstWants.pending.Entries() - cancels := mq.cancels.Keys() - if !supportsHave { - filteredPeerEntries := peerEntries[:0] - // If the remote peer doesn't support HAVE / DONT_HAVE messages, - // don't send want-haves (only send want-blocks) - // - // Doing this here under the lock makes everything else in this - // function simpler. - // - // TODO: We should _try_ to avoid recording these in the first - // place if possible. - for _, e := range peerEntries { - if e.WantType == pb.Message_Wantlist_Have { - mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) - } else { - filteredPeerEntries = append(filteredPeerEntries, e) - } - } - peerEntries = filteredPeerEntries - } - mq.wllock.Unlock() - - // We prioritize cancels, then regular wants, then broadcast wants. - - var ( - msgSize = 0 // size of message so far - sentCancels = 0 // number of cancels in message - sentPeerEntries = 0 // number of peer entries in message - sentBcstEntries = 0 // number of broadcast entries in message - ) - - // Add each cancel to the message - for _, c := range cancels { - msgSize += mq.msg.Cancel(c) - sentCancels++ - - if msgSize >= mq.maxMessageSize { - goto FINISH - } - } - - // Next, add the wants. If we have too many entries to fit into a single - // message, sort by priority and include the high priority ones first. - - for _, e := range peerEntries { - msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) - sentPeerEntries++ - - if msgSize >= mq.maxMessageSize { - goto FINISH - } - } - - // Add each broadcast want-have to the message - for _, e := range bcstEntries { - // Broadcast wants are sent as want-have - wantType := pb.Message_Wantlist_Have - - // If the remote peer doesn't support HAVE / DONT_HAVE messages, - // send a want-block instead - if !supportsHave { - wantType = pb.Message_Wantlist_Block - } - - msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) - sentBcstEntries++ - - if msgSize >= mq.maxMessageSize { - goto FINISH - } - } - -FINISH: - - // Finally, re-take the lock, mark sent and remove any entries from our - // message that we've decided to cancel at the last minute. - mq.wllock.Lock() - for i, e := range peerEntries[:sentPeerEntries] { - if !mq.peerWants.MarkSent(e) { - // It changed. - mq.msg.Remove(e.Cid) - peerEntries[i].Cid = cid.Undef - } - } - - for i, e := range bcstEntries[:sentBcstEntries] { - if !mq.bcstWants.MarkSent(e) { - mq.msg.Remove(e.Cid) - bcstEntries[i].Cid = cid.Undef - } - } - - for _, c := range cancels[:sentCancels] { - if !mq.cancels.Has(c) { - mq.msg.Remove(c) - } else { - mq.cancels.Remove(c) - } - } - mq.wllock.Unlock() - - // When the message has been sent, record the time at which each want was - // sent so we can calculate message latency - onSent := func() { - now := mq.clock.Now() - - mq.wllock.Lock() - defer mq.wllock.Unlock() - - for _, e := range peerEntries[:sentPeerEntries] { - if e.Cid.Defined() { // Check if want was cancelled in the interim - mq.peerWants.SentAt(e.Cid, now) - } - } - - for _, e := range bcstEntries[:sentBcstEntries] { - if e.Cid.Defined() { // Check if want was cancelled in the interim - mq.bcstWants.SentAt(e.Cid, now) - } - } - if mq.events != nil { - mq.events <- messageFinishedSending - } - } - - return mq.msg, onSent -} - -func (mq *MessageQueue) initializeSender() (bsnet.MessageSender, error) { - if mq.sender == nil { - opts := &bsnet.MessageSenderOpts{ - MaxRetries: maxRetries, - SendTimeout: sendTimeout, - SendErrorBackoff: sendErrorBackoff, - } - nsender, err := mq.network.NewMessageSender(mq.ctx, mq.p, opts) - if err != nil { - return nil, err - } - - mq.sender = nsender - } - return mq.sender, nil -} diff --git a/client/internal/messagequeue/messagequeue_test.go b/client/internal/messagequeue/messagequeue_test.go deleted file mode 100644 index 337435e5..00000000 --- a/client/internal/messagequeue/messagequeue_test.go +++ /dev/null @@ -1,836 +0,0 @@ -package messagequeue - -import ( - "context" - "fmt" - "math" - "math/rand" - "sync" - "testing" - "time" - - "github.com/benbjohnson/clock" - "github.com/ipfs/go-bitswap/internal/testutil" - pb "github.com/ipfs/go-bitswap/message/pb" - cid "github.com/ipfs/go-cid" - - bsmsg "github.com/ipfs/go-bitswap/message" - bsnet "github.com/ipfs/go-bitswap/network" - peer "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" -) - -type fakeMessageNetwork struct { - connectError error - messageSenderError error - messageSender bsnet.MessageSender -} - -func (fmn *fakeMessageNetwork) ConnectTo(context.Context, peer.ID) error { - return fmn.connectError -} - -func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) { - if fmn.messageSenderError == nil { - return fmn.messageSender, nil - } - return nil, fmn.messageSenderError -} - -func (fms *fakeMessageNetwork) Self() peer.ID { return "" } -func (fms *fakeMessageNetwork) Latency(peer.ID) time.Duration { return 0 } -func (fms *fakeMessageNetwork) Ping(context.Context, peer.ID) ping.Result { - return ping.Result{Error: fmt.Errorf("ping error")} -} - -type fakeDontHaveTimeoutMgr struct { - lk sync.Mutex - ks []cid.Cid - latencyUpds []time.Duration -} - -func (fp *fakeDontHaveTimeoutMgr) Start() {} -func (fp *fakeDontHaveTimeoutMgr) Shutdown() {} -func (fp *fakeDontHaveTimeoutMgr) AddPending(ks []cid.Cid) { - fp.lk.Lock() - defer fp.lk.Unlock() - - s := cid.NewSet() - for _, c := range append(fp.ks, ks...) { - s.Add(c) - } - fp.ks = s.Keys() -} -func (fp *fakeDontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { - fp.lk.Lock() - defer fp.lk.Unlock() - - s := cid.NewSet() - for _, c := range fp.ks { - s.Add(c) - } - for _, c := range ks { - s.Remove(c) - } - fp.ks = s.Keys() -} -func (fp *fakeDontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { - fp.lk.Lock() - defer fp.lk.Unlock() - - fp.latencyUpds = append(fp.latencyUpds, elapsed) -} -func (fp *fakeDontHaveTimeoutMgr) latencyUpdates() []time.Duration { - fp.lk.Lock() - defer fp.lk.Unlock() - - return fp.latencyUpds -} -func (fp *fakeDontHaveTimeoutMgr) pendingCount() int { - fp.lk.Lock() - defer fp.lk.Unlock() - - return len(fp.ks) -} - -type fakeMessageSender struct { - lk sync.Mutex - reset chan<- struct{} - messagesSent chan<- []bsmsg.Entry - supportsHave bool -} - -func newFakeMessageSender(reset chan<- struct{}, - messagesSent chan<- []bsmsg.Entry, supportsHave bool) *fakeMessageSender { - - return &fakeMessageSender{ - reset: reset, - messagesSent: messagesSent, - supportsHave: supportsHave, - } -} - -func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { - fms.lk.Lock() - defer fms.lk.Unlock() - - fms.messagesSent <- msg.Wantlist() - return nil -} -func (fms *fakeMessageSender) Close() error { return nil } -func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } -func (fms *fakeMessageSender) SupportsHave() bool { return fms.supportsHave } - -func mockTimeoutCb(peer.ID, []cid.Cid) {} - -func collectMessages(ctx context.Context, - t *testing.T, - messagesSent <-chan []bsmsg.Entry, - timeout time.Duration) [][]bsmsg.Entry { - var messagesReceived [][]bsmsg.Entry - timeoutctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - for { - select { - case messageReceived := <-messagesSent: - messagesReceived = append(messagesReceived, messageReceived) - case <-timeoutctx.Done(): - return messagesReceived - } - } -} - -func totalEntriesLength(messages [][]bsmsg.Entry) int { - totalLength := 0 - for _, m := range messages { - totalLength += len(m) - } - return totalLength -} - -func expectEvent(t *testing.T, events <-chan messageEvent, expectedEvent messageEvent) { - evt := <-events - if evt != expectedEvent { - t.Fatal("message not queued") - } -} - -func TestStartupAndShutdown(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - bcstwh := testutil.GenerateCids(10) - - messageQueue.Startup() - messageQueue.AddBroadcastWantHaves(bcstwh) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - if len(messages) != 1 { - t.Fatal("wrong number of messages were sent for broadcast want-haves") - } - - firstMessage := messages[0] - if len(firstMessage) != len(bcstwh) { - t.Fatal("did not add all wants to want list") - } - for _, entry := range firstMessage { - if entry.Cancel { - t.Fatal("initial add sent cancel entry when it should not have") - } - } - - messageQueue.Shutdown() - - timeoutctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) - defer cancel() - select { - case <-resetChan: - case <-timeoutctx.Done(): - t.Fatal("message sender should have been reset but wasn't") - } -} - -func TestSendingMessagesDeduped(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves := testutil.GenerateCids(10) - wantBlocks := testutil.GenerateCids(10) - - messageQueue.Startup() - messageQueue.AddWants(wantBlocks, wantHaves) - messageQueue.AddWants(wantBlocks, wantHaves) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - - if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { - t.Fatal("Messages were not deduped") - } -} - -func TestSendingMessagesPartialDupe(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves := testutil.GenerateCids(10) - wantBlocks := testutil.GenerateCids(10) - - messageQueue.Startup() - messageQueue.AddWants(wantBlocks[:8], wantHaves[:8]) - messageQueue.AddWants(wantBlocks[3:], wantHaves[3:]) - messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) - - if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { - t.Fatal("messages were not correctly deduped") - } -} - -func TestSendingMessagesPriority(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves1 := testutil.GenerateCids(5) - wantHaves2 := testutil.GenerateCids(5) - wantHaves := append(wantHaves1, wantHaves2...) - wantBlocks1 := testutil.GenerateCids(5) - wantBlocks2 := testutil.GenerateCids(5) - wantBlocks := append(wantBlocks1, wantBlocks2...) - - messageQueue.Startup() - messageQueue.AddWants(wantBlocks1, wantHaves1) - messageQueue.AddWants(wantBlocks2, wantHaves2) - messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) - - if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { - t.Fatal("wrong number of wants") - } - byCid := make(map[cid.Cid]bsmsg.Entry) - for _, entry := range messages[0] { - byCid[entry.Cid] = entry - } - - // Check that earliest want-haves have highest priority - for i := range wantHaves { - if i > 0 { - if byCid[wantHaves[i]].Priority > byCid[wantHaves[i-1]].Priority { - t.Fatal("earliest want-haves should have higher priority") - } - } - } - - // Check that earliest want-blocks have highest priority - for i := range wantBlocks { - if i > 0 { - if byCid[wantBlocks[i]].Priority > byCid[wantBlocks[i-1]].Priority { - t.Fatal("earliest want-blocks should have higher priority") - } - } - } - - // Check that want-haves have higher priority than want-blocks within - // same group - for i := range wantHaves1 { - if i > 0 { - if byCid[wantHaves[i]].Priority <= byCid[wantBlocks[0]].Priority { - t.Fatal("want-haves should have higher priority than want-blocks") - } - } - } - - // Check that all items in first group have higher priority than first item - // in second group - for i := range wantHaves1 { - if i > 0 { - if byCid[wantHaves[i]].Priority <= byCid[wantHaves2[0]].Priority { - t.Fatal("items in first group should have higher priority than items in second group") - } - } - } -} - -func TestCancelOverridesPendingWants(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - - wantHaves := testutil.GenerateCids(2) - wantBlocks := testutil.GenerateCids(2) - cancels := []cid.Cid{wantBlocks[0], wantHaves[0]} - - messageQueue.Startup() - messageQueue.AddWants(wantBlocks, wantHaves) - messageQueue.AddCancels(cancels) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - - if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks)-len(cancels) { - t.Fatal("Wrong message count") - } - - // Cancelled 1 want-block and 1 want-have before they were sent - // so that leaves 1 want-block and 1 want-have - wb, wh, cl := filterWantTypes(messages[0]) - if len(wb) != 1 || !wb[0].Equals(wantBlocks[1]) { - t.Fatal("Expected 1 want-block") - } - if len(wh) != 1 || !wh[0].Equals(wantHaves[1]) { - t.Fatal("Expected 1 want-have") - } - // Cancelled wants before they were sent, so no cancel should be sent - // to the network - if len(cl) != 0 { - t.Fatal("Expected no cancels") - } - - // Cancel the remaining want-blocks and want-haves - cancels = append(wantHaves, wantBlocks...) - messageQueue.AddCancels(cancels) - messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - - // The remaining 2 cancels should be sent to the network as they are for - // wants that were sent to the network - _, _, cl = filterWantTypes(messages[0]) - if len(cl) != 2 { - t.Fatal("Expected 2 cancels") - } -} - -func TestWantOverridesPendingCancels(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - - cids := testutil.GenerateCids(3) - wantBlocks := cids[:1] - wantHaves := cids[1:] - - messageQueue.Startup() - - // Add 1 want-block and 2 want-haves - messageQueue.AddWants(wantBlocks, wantHaves) - - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - if totalEntriesLength(messages) != len(wantBlocks)+len(wantHaves) { - t.Fatal("Wrong message count", totalEntriesLength(messages)) - } - - // Cancel existing wants - messageQueue.AddCancels(cids) - // Override one cancel with a want-block (before cancel is sent to network) - messageQueue.AddWants(cids[:1], []cid.Cid{}) - - messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - if totalEntriesLength(messages) != 3 { - t.Fatal("Wrong message count", totalEntriesLength(messages)) - } - - // Should send 1 want-block and 2 cancels - wb, wh, cl := filterWantTypes(messages[0]) - if len(wb) != 1 { - t.Fatal("Expected 1 want-block") - } - if len(wh) != 0 { - t.Fatal("Expected 0 want-have") - } - if len(cl) != 2 { - t.Fatal("Expected 2 cancels") - } -} - -func TestWantlistRebroadcast(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - dhtm := &fakeDontHaveTimeoutMgr{} - clock := clock.NewMock() - events := make(chan messageEvent) - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) - bcstwh := testutil.GenerateCids(10) - wantHaves := testutil.GenerateCids(10) - wantBlocks := testutil.GenerateCids(10) - - // Add some broadcast want-haves - messageQueue.Startup() - messageQueue.AddBroadcastWantHaves(bcstwh) - expectEvent(t, events, messageQueued) - clock.Add(sendMessageDebounce) - message := <-messagesSent - expectEvent(t, events, messageFinishedSending) - - // All broadcast want-haves should have been sent - if len(message) != len(bcstwh) { - t.Fatal("wrong number of wants") - } - - // Tell message queue to rebroadcast after 5ms, then wait 8ms - messageQueue.SetRebroadcastInterval(5 * time.Millisecond) - clock.Add(8 * time.Millisecond) - message = <-messagesSent - expectEvent(t, events, messageFinishedSending) - - // All the want-haves should have been rebroadcast - if len(message) != len(bcstwh) { - t.Fatal("did not rebroadcast all wants") - } - - // Tell message queue to rebroadcast after a long time (so it doesn't - // interfere with the next message collection), then send out some - // regular wants and collect them - messageQueue.SetRebroadcastInterval(1 * time.Second) - messageQueue.AddWants(wantBlocks, wantHaves) - expectEvent(t, events, messageQueued) - clock.Add(10 * time.Millisecond) - message = <-messagesSent - expectEvent(t, events, messageFinishedSending) - - // All new wants should have been sent - if len(message) != len(wantHaves)+len(wantBlocks) { - t.Fatal("wrong number of wants") - } - - select { - case <-messagesSent: - t.Fatal("should only be one message in queue") - default: - } - - // Tell message queue to rebroadcast after 10ms, then wait 15ms - messageQueue.SetRebroadcastInterval(10 * time.Millisecond) - clock.Add(15 * time.Millisecond) - message = <-messagesSent - expectEvent(t, events, messageFinishedSending) - - // Both original and new wants should have been rebroadcast - totalWants := len(bcstwh) + len(wantHaves) + len(wantBlocks) - if len(message) != totalWants { - t.Fatal("did not rebroadcast all wants") - } - - // Cancel some of the wants - messageQueue.SetRebroadcastInterval(1 * time.Second) - cancels := append([]cid.Cid{bcstwh[0]}, wantHaves[0], wantBlocks[0]) - messageQueue.AddCancels(cancels) - expectEvent(t, events, messageQueued) - clock.Add(10 * time.Millisecond) - message = <-messagesSent - expectEvent(t, events, messageFinishedSending) - - select { - case <-messagesSent: - t.Fatal("should only be one message in queue") - default: - } - - // Cancels for each want should have been sent - if len(message) != len(cancels) { - t.Fatal("wrong number of cancels") - } - for _, entry := range message { - if !entry.Cancel { - t.Fatal("expected cancels") - } - } - - // Tell message queue to rebroadcast after 10ms, then wait 15ms - messageQueue.SetRebroadcastInterval(10 * time.Millisecond) - clock.Add(15 * time.Millisecond) - message = <-messagesSent - expectEvent(t, events, messageFinishedSending) - - if len(message) != totalWants-len(cancels) { - t.Fatal("did not rebroadcast all wants") - } -} - -func TestSendingLargeMessages(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - dhtm := &fakeDontHaveTimeoutMgr{} - peerID := testutil.GeneratePeers(1)[0] - - wantBlocks := testutil.GenerateCids(10) - entrySize := 44 - maxMsgSize := entrySize * 3 // 3 wants - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) - - messageQueue.Startup() - messageQueue.AddWants(wantBlocks, []cid.Cid{}) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - - // want-block has size 44, so with maxMsgSize 44 * 3 (3 want-blocks), then if - // we send 10 want-blocks we should expect 4 messages: - // [***] [***] [***] [*] - if len(messages) != 4 { - t.Fatal("expected 4 messages to be sent, got", len(messages)) - } - if totalEntriesLength(messages) != len(wantBlocks) { - t.Fatal("wrong number of wants") - } -} - -func TestSendToPeerThatDoesntSupportHave(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, false) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - - messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - messageQueue.Startup() - - // If the remote peer doesn't support HAVE / DONT_HAVE messages - // - want-blocks should be sent normally - // - want-haves should not be sent - // - broadcast want-haves should be sent as want-blocks - - // Check broadcast want-haves - bcwh := testutil.GenerateCids(10) - messageQueue.AddBroadcastWantHaves(bcwh) - messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - - if len(messages) != 1 { - t.Fatal("wrong number of messages were sent", len(messages)) - } - wl := messages[0] - if len(wl) != len(bcwh) { - t.Fatal("wrong number of entries in wantlist", len(wl)) - } - for _, entry := range wl { - if entry.WantType != pb.Message_Wantlist_Block { - t.Fatal("broadcast want-haves should be sent as want-blocks") - } - } - - // Check regular want-haves and want-blocks - wbs := testutil.GenerateCids(10) - whs := testutil.GenerateCids(10) - messageQueue.AddWants(wbs, whs) - messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - - if len(messages) != 1 { - t.Fatal("wrong number of messages were sent", len(messages)) - } - wl = messages[0] - if len(wl) != len(wbs) { - t.Fatal("should only send want-blocks (no want-haves)", len(wl)) - } - for _, entry := range wl { - if entry.WantType != pb.Message_Wantlist_Block { - t.Fatal("should only send want-blocks") - } - } -} - -func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, false) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - - dhtm := &fakeDontHaveTimeoutMgr{} - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) - messageQueue.Startup() - - wbs := testutil.GenerateCids(10) - messageQueue.AddWants(wbs, nil) - collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - - // Check want-blocks are added to DontHaveTimeoutMgr - if dhtm.pendingCount() != len(wbs) { - t.Fatal("want-blocks not added to DontHaveTimeoutMgr") - } - - cancelCount := 2 - messageQueue.AddCancels(wbs[:cancelCount]) - collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - - // Check want-blocks are removed from DontHaveTimeoutMgr - if dhtm.pendingCount() != len(wbs)-cancelCount { - t.Fatal("want-blocks not removed from DontHaveTimeoutMgr") - } -} - -func TestResponseReceived(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, false) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - - dhtm := &fakeDontHaveTimeoutMgr{} - clock := clock.NewMock() - events := make(chan messageEvent) - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) - messageQueue.Startup() - - cids := testutil.GenerateCids(10) - - // Add some wants - messageQueue.AddWants(cids[:5], nil) - expectEvent(t, events, messageQueued) - clock.Add(sendMessageDebounce) - <-messagesSent - expectEvent(t, events, messageFinishedSending) - - // simulate 10 milliseconds passing - clock.Add(10 * time.Millisecond) - - // Add some wants and wait another 10ms - messageQueue.AddWants(cids[5:8], nil) - expectEvent(t, events, messageQueued) - clock.Add(10 * time.Millisecond) - <-messagesSent - expectEvent(t, events, messageFinishedSending) - - // Receive a response for some of the wants from both groups - messageQueue.ResponseReceived([]cid.Cid{cids[0], cids[6], cids[9]}) - - // Check that message queue informs DHTM of received responses - expectEvent(t, events, latenciesRecorded) - upds := dhtm.latencyUpdates() - if len(upds) != 1 { - t.Fatal("expected one latency update") - } - // Elapsed time should be between when the first want was sent and the - // response received (about 20ms) - if upds[0] != 20*time.Millisecond { - t.Fatal("expected latency to be time since oldest message sent") - } -} - -func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, false) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - - dhtm := &fakeDontHaveTimeoutMgr{} - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) - messageQueue.Startup() - - cids := testutil.GenerateCids(2) - - // Add some wants and wait 10ms - messageQueue.AddWants(cids, nil) - collectMessages(ctx, t, messagesSent, 100*time.Millisecond) - - // Receive a response for the wants - messageQueue.ResponseReceived(cids) - - // Wait another 10ms - time.Sleep(10 * time.Millisecond) - - // Message queue should inform DHTM of first response - upds := dhtm.latencyUpdates() - if len(upds) != 1 { - t.Fatal("expected one latency update") - } - - // Receive a second response for the same wants - messageQueue.ResponseReceived(cids) - - // Wait for the response to be processed by the message queue - time.Sleep(10 * time.Millisecond) - - // Message queue should not inform DHTM of second response because the - // CIDs are a subset of the first response - upds = dhtm.latencyUpdates() - if len(upds) != 1 { - t.Fatal("expected one latency update") - } -} - -func TestResponseReceivedDiscardsOutliers(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, false) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] - - maxValLatency := 30 * time.Millisecond - dhtm := &fakeDontHaveTimeoutMgr{} - clock := clock.NewMock() - events := make(chan messageEvent) - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValLatency, dhtm, clock, events) - messageQueue.Startup() - - cids := testutil.GenerateCids(4) - - // Add some wants and wait 20ms - messageQueue.AddWants(cids[:2], nil) - expectEvent(t, events, messageQueued) - clock.Add(sendMessageDebounce) - <-messagesSent - expectEvent(t, events, messageFinishedSending) - - clock.Add(20 * time.Millisecond) - - // Add some more wants and wait long enough that the first wants will be - // outside the maximum valid latency, but the second wants will be inside - messageQueue.AddWants(cids[2:], nil) - expectEvent(t, events, messageQueued) - clock.Add(sendMessageDebounce) - <-messagesSent - expectEvent(t, events, messageFinishedSending) - - clock.Add(maxValLatency - 10*time.Millisecond + sendMessageDebounce) - // Receive a response for the wants - messageQueue.ResponseReceived(cids) - - // Check that the latency calculation excludes the first wants - // (because they're older than max valid latency) - expectEvent(t, events, latenciesRecorded) - upds := dhtm.latencyUpdates() - if len(upds) != 1 { - t.Fatal("expected one latency update") - } - // Elapsed time should not include outliers - if upds[0] > maxValLatency { - t.Fatal("expected latency calculation to discard outliers") - } -} - -func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { - var wbs []cid.Cid - var whs []cid.Cid - var cls []cid.Cid - for _, e := range wantlist { - if e.Cancel { - cls = append(cls, e.Cid) - } else if e.WantType == pb.Message_Wantlist_Block { - wbs = append(wbs, e.Cid) - } else { - whs = append(whs, e.Cid) - } - } - return wbs, whs, cls -} - -// Simplistic benchmark to allow us to simulate conditions on the gateways -func BenchmarkMessageQueue(b *testing.B) { - ctx := context.Background() - - createQueue := func() *MessageQueue { - messagesSent := make(chan []bsmsg.Entry) - resetChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(resetChan, messagesSent, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - dhtm := &fakeDontHaveTimeoutMgr{} - peerID := testutil.GeneratePeers(1)[0] - - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) - messageQueue.Startup() - - go func() { - for { - <-messagesSent - time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond) - } - }() - - return messageQueue - } - - // Create a handful of message queues to start with - var qs []*MessageQueue - for i := 0; i < 5; i++ { - qs = append(qs, createQueue()) - } - - for n := 0; n < b.N; n++ { - // Create a new message queue every 10 ticks - if n%10 == 0 { - qs = append(qs, createQueue()) - } - - // Pick a random message queue, favoring those created later - qn := len(qs) - i := int(math.Floor(float64(qn) * float64(1-rand.Float32()*rand.Float32()))) - if i >= qn { // because of floating point math - i = qn - 1 - } - - // Alternately add either a few wants or a lot of broadcast wants - if rand.Intn(2) == 0 { - wants := testutil.GenerateCids(10) - qs[i].AddWants(wants[:2], wants[2:]) - } else { - wants := testutil.GenerateCids(60) - qs[i].AddBroadcastWantHaves(wants) - } - } -} diff --git a/client/internal/notifications/notifications.go b/client/internal/notifications/notifications.go deleted file mode 100644 index ed4b79f5..00000000 --- a/client/internal/notifications/notifications.go +++ /dev/null @@ -1,139 +0,0 @@ -package notifications - -import ( - "context" - "sync" - - pubsub "github.com/cskr/pubsub" - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" -) - -const bufferSize = 16 - -// PubSub is a simple interface for publishing blocks and being able to subscribe -// for cids. It's used internally by bitswap to decouple receiving blocks -// and actually providing them back to the GetBlocks caller. -type PubSub interface { - Publish(blocks ...blocks.Block) - Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block - Shutdown() -} - -// New generates a new PubSub interface. -func New() PubSub { - return &impl{ - wrapped: *pubsub.New(bufferSize), - closed: make(chan struct{}), - } -} - -type impl struct { - lk sync.RWMutex - wrapped pubsub.PubSub - - closed chan struct{} -} - -func (ps *impl) Publish(blocks ...blocks.Block) { - ps.lk.RLock() - defer ps.lk.RUnlock() - select { - case <-ps.closed: - return - default: - } - - for _, block := range blocks { - ps.wrapped.Pub(block, block.Cid().KeyString()) - } -} - -func (ps *impl) Shutdown() { - ps.lk.Lock() - defer ps.lk.Unlock() - select { - case <-ps.closed: - return - default: - } - close(ps.closed) - ps.wrapped.Shutdown() -} - -// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| -// is closed if the |ctx| times out or is cancelled, or after receiving the blocks -// corresponding to |keys|. -func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block { - - blocksCh := make(chan blocks.Block, len(keys)) - valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking - if len(keys) == 0 { - close(blocksCh) - return blocksCh - } - - // prevent shutdown - ps.lk.RLock() - defer ps.lk.RUnlock() - - select { - case <-ps.closed: - close(blocksCh) - return blocksCh - default: - } - - // AddSubOnceEach listens for each key in the list, and closes the channel - // once all keys have been received - ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) - go func() { - defer func() { - close(blocksCh) - - ps.lk.RLock() - defer ps.lk.RUnlock() - // Don't touch the pubsub instance if we're - // already closed. - select { - case <-ps.closed: - return - default: - } - - ps.wrapped.Unsub(valuesCh) - }() - - for { - select { - case <-ctx.Done(): - return - case <-ps.closed: - case val, ok := <-valuesCh: - if !ok { - return - } - block, ok := val.(blocks.Block) - if !ok { - return - } - select { - case <-ctx.Done(): - return - case blocksCh <- block: // continue - case <-ps.closed: - } - } - } - }() - - return blocksCh -} - -func toStrings(keys []cid.Cid) []string { - strs := make([]string, 0, len(keys)) - for _, key := range keys { - strs = append(strs, key.KeyString()) - } - return strs -} diff --git a/client/internal/notifications/notifications_test.go b/client/internal/notifications/notifications_test.go deleted file mode 100644 index 4e59ae9b..00000000 --- a/client/internal/notifications/notifications_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package notifications - -import ( - "bytes" - "context" - "testing" - "time" - - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" -) - -func TestDuplicates(t *testing.T) { - b1 := blocks.NewBlock([]byte("1")) - b2 := blocks.NewBlock([]byte("2")) - - n := New() - defer n.Shutdown() - ch := n.Subscribe(context.Background(), b1.Cid(), b2.Cid()) - - n.Publish(b1) - blockRecvd, ok := <-ch - if !ok { - t.Fail() - } - assertBlocksEqual(t, b1, blockRecvd) - - n.Publish(b1) // ignored duplicate - - n.Publish(b2) - blockRecvd, ok = <-ch - if !ok { - t.Fail() - } - assertBlocksEqual(t, b2, blockRecvd) -} - -func TestPublishSubscribe(t *testing.T) { - blockSent := blocks.NewBlock([]byte("Greetings from The Interval")) - - n := New() - defer n.Shutdown() - ch := n.Subscribe(context.Background(), blockSent.Cid()) - - n.Publish(blockSent) - blockRecvd, ok := <-ch - if !ok { - t.Fail() - } - - assertBlocksEqual(t, blockRecvd, blockSent) - -} - -func TestSubscribeMany(t *testing.T) { - e1 := blocks.NewBlock([]byte("1")) - e2 := blocks.NewBlock([]byte("2")) - - n := New() - defer n.Shutdown() - ch := n.Subscribe(context.Background(), e1.Cid(), e2.Cid()) - - n.Publish(e1) - r1, ok := <-ch - if !ok { - t.Fatal("didn't receive first expected block") - } - assertBlocksEqual(t, e1, r1) - - n.Publish(e2) - r2, ok := <-ch - if !ok { - t.Fatal("didn't receive second expected block") - } - assertBlocksEqual(t, e2, r2) -} - -// TestDuplicateSubscribe tests a scenario where a given block -// would be requested twice at the same time. -func TestDuplicateSubscribe(t *testing.T) { - e1 := blocks.NewBlock([]byte("1")) - - n := New() - defer n.Shutdown() - ch1 := n.Subscribe(context.Background(), e1.Cid()) - ch2 := n.Subscribe(context.Background(), e1.Cid()) - - n.Publish(e1) - r1, ok := <-ch1 - if !ok { - t.Fatal("didn't receive first expected block") - } - assertBlocksEqual(t, e1, r1) - - r2, ok := <-ch2 - if !ok { - t.Fatal("didn't receive second expected block") - } - assertBlocksEqual(t, e1, r2) -} - -func TestShutdownBeforeUnsubscribe(t *testing.T) { - e1 := blocks.NewBlock([]byte("1")) - - n := New() - ctx, cancel := context.WithCancel(context.Background()) - ch := n.Subscribe(ctx, e1.Cid()) // no keys provided - n.Shutdown() - cancel() - - select { - case _, ok := <-ch: - if ok { - t.Fatal("channel should have been closed") - } - case <-time.After(5 * time.Second): - t.Fatal("channel should have been closed") - } -} - -func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) { - n := New() - defer n.Shutdown() - ch := n.Subscribe(context.Background()) // no keys provided - if _, ok := <-ch; ok { - t.Fatal("should be closed if no keys provided") - } -} - -func TestCarryOnWhenDeadlineExpires(t *testing.T) { - - impossibleDeadline := time.Nanosecond - fastExpiringCtx, cancel := context.WithTimeout(context.Background(), impossibleDeadline) - defer cancel() - - n := New() - defer n.Shutdown() - block := blocks.NewBlock([]byte("A Missed Connection")) - blockChannel := n.Subscribe(fastExpiringCtx, block.Cid()) - - assertBlockChannelNil(t, blockChannel) -} - -func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) { - - g := blocksutil.NewBlockGenerator() - ctx, cancel := context.WithCancel(context.Background()) - n := New() - defer n.Shutdown() - - t.Log("generate a large number of blocks. exceed default buffer") - bs := g.Blocks(1000) - ks := func() []cid.Cid { - var keys []cid.Cid - for _, b := range bs { - keys = append(keys, b.Cid()) - } - return keys - }() - - _ = n.Subscribe(ctx, ks...) // ignore received channel - - t.Log("cancel context before any blocks published") - cancel() - for _, b := range bs { - n.Publish(b) - } - - t.Log("publishing the large number of blocks to the ignored channel must not deadlock") -} - -func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) { - _, ok := <-blockChannel - if ok { - t.Fail() - } -} - -func assertBlocksEqual(t *testing.T, a, b blocks.Block) { - if !bytes.Equal(a.RawData(), b.RawData()) { - t.Fatal("blocks aren't equal") - } - if a.Cid() != b.Cid() { - t.Fatal("block keys aren't equal") - } -} diff --git a/client/internal/peermanager/peermanager.go b/client/internal/peermanager/peermanager.go deleted file mode 100644 index dbce5bdd..00000000 --- a/client/internal/peermanager/peermanager.go +++ /dev/null @@ -1,246 +0,0 @@ -package peermanager - -import ( - "context" - "sync" - - logging "github.com/ipfs/go-log" - "github.com/ipfs/go-metrics-interface" - - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var log = logging.Logger("bs:peermgr") - -// PeerQueue provides a queue of messages to be sent for a single peer. -type PeerQueue interface { - AddBroadcastWantHaves([]cid.Cid) - AddWants([]cid.Cid, []cid.Cid) - AddCancels([]cid.Cid) - ResponseReceived(ks []cid.Cid) - Startup() - Shutdown() -} - -type Session interface { - ID() uint64 - SignalAvailability(peer.ID, bool) -} - -// PeerQueueFactory provides a function that will create a PeerQueue. -type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue - -// PeerManager manages a pool of peers and sends messages to peers in the pool. -type PeerManager struct { - // sync access to peerQueues and peerWantManager - pqLk sync.RWMutex - // peerQueues -- interact through internal utility functions get/set/remove/iterate - peerQueues map[peer.ID]PeerQueue - pwm *peerWantManager - - createPeerQueue PeerQueueFactory - ctx context.Context - - psLk sync.RWMutex - sessions map[uint64]Session - peerSessions map[peer.ID]map[uint64]struct{} - - self peer.ID -} - -// New creates a new PeerManager, given a context and a peerQueueFactory. -func New(ctx context.Context, createPeerQueue PeerQueueFactory, self peer.ID) *PeerManager { - wantGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() - wantBlockGauge := metrics.NewCtx(ctx, "want_blocks_total", "Number of want-blocks in wantlist.").Gauge() - return &PeerManager{ - peerQueues: make(map[peer.ID]PeerQueue), - pwm: newPeerWantManager(wantGauge, wantBlockGauge), - createPeerQueue: createPeerQueue, - ctx: ctx, - self: self, - - sessions: make(map[uint64]Session), - peerSessions: make(map[peer.ID]map[uint64]struct{}), - } -} - -func (pm *PeerManager) AvailablePeers() []peer.ID { - // TODO: Rate-limit peers - return pm.ConnectedPeers() -} - -// ConnectedPeers returns a list of peers this PeerManager is managing. -func (pm *PeerManager) ConnectedPeers() []peer.ID { - pm.pqLk.RLock() - defer pm.pqLk.RUnlock() - - peers := make([]peer.ID, 0, len(pm.peerQueues)) - for p := range pm.peerQueues { - peers = append(peers, p) - } - return peers -} - -// Connected is called to add a new peer to the pool, and send it an initial set -// of wants. -func (pm *PeerManager) Connected(p peer.ID) { - pm.pqLk.Lock() - defer pm.pqLk.Unlock() - - pq := pm.getOrCreate(p) - - // Inform the peer want manager that there's a new peer - pm.pwm.addPeer(pq, p) - - // Inform the sessions that the peer has connected - pm.signalAvailability(p, true) -} - -// Disconnected is called to remove a peer from the pool. -func (pm *PeerManager) Disconnected(p peer.ID) { - pm.pqLk.Lock() - defer pm.pqLk.Unlock() - - pq, ok := pm.peerQueues[p] - - if !ok { - return - } - - // Inform the sessions that the peer has disconnected - pm.signalAvailability(p, false) - - // Clean up the peer - delete(pm.peerQueues, p) - pq.Shutdown() - pm.pwm.removePeer(p) -} - -// ResponseReceived is called when a message is received from the network. -// ks is the set of blocks, HAVEs and DONT_HAVEs in the message -// Note that this is just used to calculate latency. -func (pm *PeerManager) ResponseReceived(p peer.ID, ks []cid.Cid) { - pm.pqLk.Lock() - pq, ok := pm.peerQueues[p] - pm.pqLk.Unlock() - - if ok { - pq.ResponseReceived(ks) - } -} - -// BroadcastWantHaves broadcasts want-haves to all peers (used by the session -// to discover seeds). -// For each peer it filters out want-haves that have previously been sent to -// the peer. -func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { - pm.pqLk.Lock() - defer pm.pqLk.Unlock() - - pm.pwm.broadcastWantHaves(wantHaves) -} - -// SendWants sends the given want-blocks and want-haves to the given peer. -// It filters out wants that have previously been sent to the peer. -func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { - pm.pqLk.Lock() - defer pm.pqLk.Unlock() - - if _, ok := pm.peerQueues[p]; ok { - pm.pwm.sendWants(p, wantBlocks, wantHaves) - } -} - -// SendCancels sends cancels for the given keys to all peers who had previously -// received a want for those keys. -func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { - pm.pqLk.Lock() - defer pm.pqLk.Unlock() - - // Send a CANCEL to each peer that has been sent a want-block or want-have - pm.pwm.sendCancels(cancelKs) -} - -// CurrentWants returns the list of pending wants (both want-haves and want-blocks). -func (pm *PeerManager) CurrentWants() []cid.Cid { - pm.pqLk.RLock() - defer pm.pqLk.RUnlock() - - return pm.pwm.getWants() -} - -// CurrentWantBlocks returns the list of pending want-blocks -func (pm *PeerManager) CurrentWantBlocks() []cid.Cid { - pm.pqLk.RLock() - defer pm.pqLk.RUnlock() - - return pm.pwm.getWantBlocks() -} - -// CurrentWantHaves returns the list of pending want-haves -func (pm *PeerManager) CurrentWantHaves() []cid.Cid { - pm.pqLk.RLock() - defer pm.pqLk.RUnlock() - - return pm.pwm.getWantHaves() -} - -func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { - pq, ok := pm.peerQueues[p] - if !ok { - pq = pm.createPeerQueue(pm.ctx, p) - pq.Startup() - pm.peerQueues[p] = pq - } - return pq -} - -// RegisterSession tells the PeerManager that the given session is interested -// in events about the given peer. -func (pm *PeerManager) RegisterSession(p peer.ID, s Session) { - pm.psLk.Lock() - defer pm.psLk.Unlock() - - if _, ok := pm.sessions[s.ID()]; !ok { - pm.sessions[s.ID()] = s - } - - if _, ok := pm.peerSessions[p]; !ok { - pm.peerSessions[p] = make(map[uint64]struct{}) - } - pm.peerSessions[p][s.ID()] = struct{}{} -} - -// UnregisterSession tells the PeerManager that the given session is no longer -// interested in PeerManager events. -func (pm *PeerManager) UnregisterSession(ses uint64) { - pm.psLk.Lock() - defer pm.psLk.Unlock() - - for p := range pm.peerSessions { - delete(pm.peerSessions[p], ses) - if len(pm.peerSessions[p]) == 0 { - delete(pm.peerSessions, p) - } - } - - delete(pm.sessions, ses) -} - -// signalAvailability is called when a peer's connectivity changes. -// It informs interested sessions. -func (pm *PeerManager) signalAvailability(p peer.ID, isConnected bool) { - pm.psLk.Lock() - defer pm.psLk.Unlock() - - sesIds, ok := pm.peerSessions[p] - if !ok { - return - } - for sesId := range sesIds { - if s, ok := pm.sessions[sesId]; ok { - s.SignalAvailability(p, isConnected) - } - } -} diff --git a/client/internal/peermanager/peermanager_test.go b/client/internal/peermanager/peermanager_test.go deleted file mode 100644 index 231f8931..00000000 --- a/client/internal/peermanager/peermanager_test.go +++ /dev/null @@ -1,379 +0,0 @@ -package peermanager - -import ( - "context" - "math/rand" - "testing" - "time" - - "github.com/ipfs/go-bitswap/internal/testutil" - cid "github.com/ipfs/go-cid" - - "github.com/libp2p/go-libp2p/core/peer" -) - -type msg struct { - p peer.ID - wantBlocks []cid.Cid - wantHaves []cid.Cid - cancels []cid.Cid -} - -type mockPeerQueue struct { - p peer.ID - msgs chan msg -} - -func (fp *mockPeerQueue) Startup() {} -func (fp *mockPeerQueue) Shutdown() {} - -func (fp *mockPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) { - fp.msgs <- msg{fp.p, nil, whs, nil} -} -func (fp *mockPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) { - fp.msgs <- msg{fp.p, wbs, whs, nil} -} -func (fp *mockPeerQueue) AddCancels(cs []cid.Cid) { - fp.msgs <- msg{fp.p, nil, nil, cs} -} -func (fp *mockPeerQueue) ResponseReceived(ks []cid.Cid) { -} - -type peerWants struct { - wantHaves []cid.Cid - wantBlocks []cid.Cid - cancels []cid.Cid -} - -func collectMessages(ch chan msg, timeout time.Duration) map[peer.ID]peerWants { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - collected := make(map[peer.ID]peerWants) - for { - select { - case m := <-ch: - pw, ok := collected[m.p] - if !ok { - pw = peerWants{} - } - pw.wantHaves = append(pw.wantHaves, m.wantHaves...) - pw.wantBlocks = append(pw.wantBlocks, m.wantBlocks...) - pw.cancels = append(pw.cancels, m.cancels...) - collected[m.p] = pw - case <-ctx.Done(): - return collected - } - } -} - -func makePeerQueueFactory(msgs chan msg) PeerQueueFactory { - return func(ctx context.Context, p peer.ID) PeerQueue { - return &mockPeerQueue{ - p: p, - msgs: msgs, - } - } -} - -func TestAddingAndRemovingPeers(t *testing.T) { - ctx := context.Background() - msgs := make(chan msg, 16) - peerQueueFactory := makePeerQueueFactory(msgs) - - tp := testutil.GeneratePeers(6) - self, peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4], tp[5] - peerManager := New(ctx, peerQueueFactory, self) - - peerManager.Connected(peer1) - peerManager.Connected(peer2) - peerManager.Connected(peer3) - - connectedPeers := peerManager.ConnectedPeers() - - if !testutil.ContainsPeer(connectedPeers, peer1) || - !testutil.ContainsPeer(connectedPeers, peer2) || - !testutil.ContainsPeer(connectedPeers, peer3) { - t.Fatal("Peers not connected that should be connected") - } - - if testutil.ContainsPeer(connectedPeers, peer4) || - testutil.ContainsPeer(connectedPeers, peer5) { - t.Fatal("Peers connected that shouldn't be connected") - } - - // disconnect a peer - peerManager.Disconnected(peer1) - connectedPeers = peerManager.ConnectedPeers() - - if testutil.ContainsPeer(connectedPeers, peer1) { - t.Fatal("Peer should have been disconnected but was not") - } - - // reconnect peer - peerManager.Connected(peer1) - connectedPeers = peerManager.ConnectedPeers() - - if !testutil.ContainsPeer(connectedPeers, peer1) { - t.Fatal("Peer should have been connected but was not") - } -} - -func TestBroadcastOnConnect(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - msgs := make(chan msg, 16) - peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(2) - self, peer1 := tp[0], tp[1] - peerManager := New(ctx, peerQueueFactory, self) - - cids := testutil.GenerateCids(2) - peerManager.BroadcastWantHaves(ctx, cids) - - // Connect with two broadcast wants for first peer - peerManager.Connected(peer1) - collected := collectMessages(msgs, 2*time.Millisecond) - - if len(collected[peer1].wantHaves) != 2 { - t.Fatal("Expected want-haves to be sent to newly connected peer") - } -} - -func TestBroadcastWantHaves(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - msgs := make(chan msg, 16) - peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(3) - self, peer1, peer2 := tp[0], tp[1], tp[2] - peerManager := New(ctx, peerQueueFactory, self) - - cids := testutil.GenerateCids(3) - - // Broadcast the first two. - peerManager.BroadcastWantHaves(ctx, cids[:2]) - - // First peer should get them. - peerManager.Connected(peer1) - collected := collectMessages(msgs, 2*time.Millisecond) - - if len(collected[peer1].wantHaves) != 2 { - t.Fatal("Expected want-haves to be sent to newly connected peer") - } - - // Connect to second peer - peerManager.Connected(peer2) - - // Send a broadcast to all peers, including cid that was already sent to - // first peer - peerManager.BroadcastWantHaves(ctx, []cid.Cid{cids[0], cids[2]}) - collected = collectMessages(msgs, 2*time.Millisecond) - - // One of the want-haves was already sent to peer1 - if len(collected[peer1].wantHaves) != 1 { - t.Fatalf("Expected 1 want-haves to be sent to first peer, got %d", - len(collected[peer1].wantHaves)) - } - if len(collected[peer2].wantHaves) != 3 { - t.Fatalf("Expected 3 want-haves to be sent to second peer, got %d", - len(collected[peer2].wantHaves)) - } -} - -func TestSendWants(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - msgs := make(chan msg, 16) - peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(2) - self, peer1 := tp[0], tp[1] - peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(4) - - peerManager.Connected(peer1) - peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0]}, []cid.Cid{cids[2]}) - collected := collectMessages(msgs, 2*time.Millisecond) - - if len(collected[peer1].wantHaves) != 1 { - t.Fatal("Expected want-have to be sent to peer") - } - if len(collected[peer1].wantBlocks) != 1 { - t.Fatal("Expected want-block to be sent to peer") - } - - peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2], cids[3]}) - collected = collectMessages(msgs, 2*time.Millisecond) - - // First want-have and want-block should be filtered (because they were - // already sent) - if len(collected[peer1].wantHaves) != 1 { - t.Fatal("Expected want-have to be sent to peer") - } - if len(collected[peer1].wantBlocks) != 1 { - t.Fatal("Expected want-block to be sent to peer") - } -} - -func TestSendCancels(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - msgs := make(chan msg, 16) - peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(3) - self, peer1, peer2 := tp[0], tp[1], tp[2] - peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(4) - - // Connect to peer1 and peer2 - peerManager.Connected(peer1) - peerManager.Connected(peer2) - - // Send 2 want-blocks and 1 want-have to peer1 - peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2]}) - - // Clear messages - collectMessages(msgs, 2*time.Millisecond) - - // Send cancels for 1 want-block and 1 want-have - peerManager.SendCancels(ctx, []cid.Cid{cids[0], cids[2]}) - collected := collectMessages(msgs, 2*time.Millisecond) - - if _, ok := collected[peer2]; ok { - t.Fatal("Expected no cancels to be sent to peer that was not sent messages") - } - if len(collected[peer1].cancels) != 2 { - t.Fatal("Expected cancel to be sent for want-block and want-have sent to peer") - } - - // Send cancels for all cids - peerManager.SendCancels(ctx, cids) - collected = collectMessages(msgs, 2*time.Millisecond) - - if _, ok := collected[peer2]; ok { - t.Fatal("Expected no cancels to be sent to peer that was not sent messages") - } - if len(collected[peer1].cancels) != 1 { - t.Fatal("Expected cancel to be sent for remaining want-block") - } -} - -func (s *sess) ID() uint64 { - return s.id -} -func (s *sess) SignalAvailability(p peer.ID, isAvailable bool) { - s.available[p] = isAvailable -} - -type sess struct { - id uint64 - available map[peer.ID]bool -} - -func newSess(id uint64) *sess { - return &sess{id, make(map[peer.ID]bool)} -} - -func TestSessionRegistration(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - msgs := make(chan msg, 16) - peerQueueFactory := makePeerQueueFactory(msgs) - - tp := testutil.GeneratePeers(3) - self, p1, p2 := tp[0], tp[1], tp[2] - peerManager := New(ctx, peerQueueFactory, self) - - id := uint64(1) - s := newSess(id) - peerManager.RegisterSession(p1, s) - if s.available[p1] { - t.Fatal("Expected peer not be available till connected") - } - peerManager.RegisterSession(p2, s) - if s.available[p2] { - t.Fatal("Expected peer not be available till connected") - } - - peerManager.Connected(p1) - if !s.available[p1] { - t.Fatal("Expected signal callback") - } - peerManager.Connected(p2) - if !s.available[p2] { - t.Fatal("Expected signal callback") - } - - peerManager.Disconnected(p1) - if s.available[p1] { - t.Fatal("Expected signal callback") - } - if !s.available[p2] { - t.Fatal("Expected signal callback only for disconnected peer") - } - - peerManager.UnregisterSession(id) - - peerManager.Connected(p1) - if s.available[p1] { - t.Fatal("Expected no signal callback (session unregistered)") - } -} - -type benchPeerQueue struct { -} - -func (*benchPeerQueue) Startup() {} -func (*benchPeerQueue) Shutdown() {} - -func (*benchPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) {} -func (*benchPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) {} -func (*benchPeerQueue) AddCancels(cs []cid.Cid) {} -func (*benchPeerQueue) ResponseReceived(ks []cid.Cid) {} - -// Simplistic benchmark to allow us to stress test -func BenchmarkPeerManager(b *testing.B) { - b.StopTimer() - - ctx := context.Background() - - peerQueueFactory := func(ctx context.Context, p peer.ID) PeerQueue { - return &benchPeerQueue{} - } - - self := testutil.GeneratePeers(1)[0] - peers := testutil.GeneratePeers(500) - peerManager := New(ctx, peerQueueFactory, self) - - // Create a bunch of connections - connected := 0 - for i := 0; i < len(peers); i++ { - peerManager.Connected(peers[i]) - connected++ - } - - var wanted []cid.Cid - - b.StartTimer() - for n := 0; n < b.N; n++ { - // Pick a random peer - i := rand.Intn(connected) - - // Alternately add either a few wants or many broadcast wants - r := rand.Intn(8) - if r == 0 { - wants := testutil.GenerateCids(10) - peerManager.SendWants(ctx, peers[i], wants[:2], wants[2:]) - wanted = append(wanted, wants...) - } else if r == 1 { - wants := testutil.GenerateCids(30) - peerManager.BroadcastWantHaves(ctx, wants) - wanted = append(wanted, wants...) - } else { - limit := len(wanted) / 10 - cancel := wanted[:limit] - wanted = wanted[limit:] - peerManager.SendCancels(ctx, cancel) - } - } -} diff --git a/client/internal/peermanager/peerwantmanager.go b/client/internal/peermanager/peerwantmanager.go deleted file mode 100644 index 0bc4732c..00000000 --- a/client/internal/peermanager/peerwantmanager.go +++ /dev/null @@ -1,464 +0,0 @@ -package peermanager - -import ( - "bytes" - "fmt" - - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// Gauge can be used to keep track of a metric that increases and decreases -// incrementally. It is used by the peerWantManager to track the number of -// want-blocks that are active (ie sent but no response received) -type Gauge interface { - Inc() - Dec() -} - -// peerWantManager keeps track of which want-haves and want-blocks have been -// sent to each peer, so that the PeerManager doesn't send duplicates. -type peerWantManager struct { - // peerWants maps peers to outstanding wants. - // A peer's wants is the _union_ of the broadcast wants and the wants in - // this list. - peerWants map[peer.ID]*peerWant - - // Reverse index of all wants in peerWants. - wantPeers map[cid.Cid]map[peer.ID]struct{} - - // broadcastWants tracks all the current broadcast wants. - broadcastWants *cid.Set - - // Keeps track of the number of active want-haves & want-blocks - wantGauge Gauge - // Keeps track of the number of active want-blocks - wantBlockGauge Gauge -} - -type peerWant struct { - wantBlocks *cid.Set - wantHaves *cid.Set - peerQueue PeerQueue -} - -// New creates a new peerWantManager with a Gauge that keeps track of the -// number of active want-blocks (ie sent but no response received) -func newPeerWantManager(wantGauge Gauge, wantBlockGauge Gauge) *peerWantManager { - return &peerWantManager{ - broadcastWants: cid.NewSet(), - peerWants: make(map[peer.ID]*peerWant), - wantPeers: make(map[cid.Cid]map[peer.ID]struct{}), - wantGauge: wantGauge, - wantBlockGauge: wantBlockGauge, - } -} - -// addPeer adds a peer whose wants we need to keep track of. It sends the -// current list of broadcast wants to the peer. -func (pwm *peerWantManager) addPeer(peerQueue PeerQueue, p peer.ID) { - if _, ok := pwm.peerWants[p]; ok { - return - } - - pwm.peerWants[p] = &peerWant{ - wantBlocks: cid.NewSet(), - wantHaves: cid.NewSet(), - peerQueue: peerQueue, - } - - // Broadcast any live want-haves to the newly connected peer - if pwm.broadcastWants.Len() > 0 { - wants := pwm.broadcastWants.Keys() - peerQueue.AddBroadcastWantHaves(wants) - } -} - -// RemovePeer removes a peer and its associated wants from tracking -func (pwm *peerWantManager) removePeer(p peer.ID) { - pws, ok := pwm.peerWants[p] - if !ok { - return - } - - // Clean up want-blocks - _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { - // Clean up want-blocks from the reverse index - pwm.reverseIndexRemove(c, p) - - // Decrement the gauges by the number of pending want-blocks to the peer - peerCounts := pwm.wantPeerCounts(c) - if peerCounts.wantBlock == 0 { - pwm.wantBlockGauge.Dec() - } - if !peerCounts.wanted() { - pwm.wantGauge.Dec() - } - - return nil - }) - - // Clean up want-haves - _ = pws.wantHaves.ForEach(func(c cid.Cid) error { - // Clean up want-haves from the reverse index - pwm.reverseIndexRemove(c, p) - - // Decrement the gauge by the number of pending want-haves to the peer - peerCounts := pwm.wantPeerCounts(c) - if !peerCounts.wanted() { - pwm.wantGauge.Dec() - } - return nil - }) - - delete(pwm.peerWants, p) -} - -// broadcastWantHaves sends want-haves to any peers that have not yet been sent them. -func (pwm *peerWantManager) broadcastWantHaves(wantHaves []cid.Cid) { - unsent := make([]cid.Cid, 0, len(wantHaves)) - for _, c := range wantHaves { - if pwm.broadcastWants.Has(c) { - // Already a broadcast want, skip it. - continue - } - pwm.broadcastWants.Add(c) - unsent = append(unsent, c) - - // If no peer has a pending want for the key - if _, ok := pwm.wantPeers[c]; !ok { - // Increment the total wants gauge - pwm.wantGauge.Inc() - } - } - - if len(unsent) == 0 { - return - } - - // Allocate a single buffer to filter broadcast wants for each peer - bcstWantsBuffer := make([]cid.Cid, 0, len(unsent)) - - // Send broadcast wants to each peer - for _, pws := range pwm.peerWants { - peerUnsent := bcstWantsBuffer[:0] - for _, c := range unsent { - // If we've already sent a want to this peer, skip them. - if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { - peerUnsent = append(peerUnsent, c) - } - } - - if len(peerUnsent) > 0 { - pws.peerQueue.AddBroadcastWantHaves(peerUnsent) - } - } -} - -// sendWants only sends the peer the want-blocks and want-haves that have not -// already been sent to it. -func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { - fltWantBlks := make([]cid.Cid, 0, len(wantBlocks)) - fltWantHvs := make([]cid.Cid, 0, len(wantHaves)) - - // Get the existing want-blocks and want-haves for the peer - pws, ok := pwm.peerWants[p] - if !ok { - // In practice this should never happen - log.Errorf("sendWants() called with peer %s but peer not found in peerWantManager", string(p)) - return - } - - // Iterate over the requested want-blocks - for _, c := range wantBlocks { - // If the want-block hasn't been sent to the peer - if pws.wantBlocks.Has(c) { - continue - } - - // Increment the want gauges - peerCounts := pwm.wantPeerCounts(c) - if peerCounts.wantBlock == 0 { - pwm.wantBlockGauge.Inc() - } - if !peerCounts.wanted() { - pwm.wantGauge.Inc() - } - - // Make sure the CID is no longer recorded as a want-have - pws.wantHaves.Remove(c) - - // Record that the CID was sent as a want-block - pws.wantBlocks.Add(c) - - // Add the CID to the results - fltWantBlks = append(fltWantBlks, c) - - // Update the reverse index - pwm.reverseIndexAdd(c, p) - } - - // Iterate over the requested want-haves - for _, c := range wantHaves { - // If we've already broadcasted this want, don't bother with a - // want-have. - if pwm.broadcastWants.Has(c) { - continue - } - - // If the CID has not been sent as a want-block or want-have - if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { - // Increment the total wants gauge - peerCounts := pwm.wantPeerCounts(c) - if !peerCounts.wanted() { - pwm.wantGauge.Inc() - } - - // Record that the CID was sent as a want-have - pws.wantHaves.Add(c) - - // Add the CID to the results - fltWantHvs = append(fltWantHvs, c) - - // Update the reverse index - pwm.reverseIndexAdd(c, p) - } - } - - // Send the want-blocks and want-haves to the peer - pws.peerQueue.AddWants(fltWantBlks, fltWantHvs) -} - -// sendCancels sends a cancel to each peer to which a corresponding want was -// sent -func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { - if len(cancelKs) == 0 { - return - } - - // Record how many peers have a pending want-block and want-have for each - // key to be cancelled - peerCounts := make(map[cid.Cid]wantPeerCnts, len(cancelKs)) - for _, c := range cancelKs { - peerCounts[c] = pwm.wantPeerCounts(c) - } - - // Create a buffer to use for filtering cancels per peer, with the - // broadcast wants at the front of the buffer (broadcast wants are sent to - // all peers) - broadcastCancels := make([]cid.Cid, 0, len(cancelKs)) - for _, c := range cancelKs { - if pwm.broadcastWants.Has(c) { - broadcastCancels = append(broadcastCancels, c) - } - } - - // Send cancels to a particular peer - send := func(p peer.ID, pws *peerWant) { - // Start from the broadcast cancels - toCancel := broadcastCancels - - // For each key to be cancelled - for _, c := range cancelKs { - // Check if a want was sent for the key - if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { - continue - } - - // Unconditionally remove from the want lists. - pws.wantBlocks.Remove(c) - pws.wantHaves.Remove(c) - - // If it's a broadcast want, we've already added it to - // the peer cancels. - if !pwm.broadcastWants.Has(c) { - toCancel = append(toCancel, c) - } - } - - // Send cancels to the peer - if len(toCancel) > 0 { - pws.peerQueue.AddCancels(toCancel) - } - } - - if len(broadcastCancels) > 0 { - // If a broadcast want is being cancelled, send the cancel to all - // peers - for p, pws := range pwm.peerWants { - send(p, pws) - } - } else { - // Only send cancels to peers that received a corresponding want - cancelPeers := make(map[peer.ID]struct{}, len(pwm.wantPeers[cancelKs[0]])) - for _, c := range cancelKs { - for p := range pwm.wantPeers[c] { - cancelPeers[p] = struct{}{} - } - } - for p := range cancelPeers { - pws, ok := pwm.peerWants[p] - if !ok { - // Should never happen but check just in case - log.Errorf("sendCancels - peerWantManager index missing peer %s", p) - continue - } - - send(p, pws) - } - } - - // Decrement the wants gauges - for _, c := range cancelKs { - peerCnts := peerCounts[c] - - // If there were any peers that had a pending want-block for the key - if peerCnts.wantBlock > 0 { - // Decrement the want-block gauge - pwm.wantBlockGauge.Dec() - } - - // If there was a peer that had a pending want or it was a broadcast want - if peerCnts.wanted() { - // Decrement the total wants gauge - pwm.wantGauge.Dec() - } - } - - // Remove cancelled broadcast wants - for _, c := range broadcastCancels { - pwm.broadcastWants.Remove(c) - } - - // Batch-remove the reverse-index. There's no need to clear this index - // peer-by-peer. - for _, c := range cancelKs { - delete(pwm.wantPeers, c) - } -} - -// wantPeerCnts stores the number of peers that have pending wants for a CID -type wantPeerCnts struct { - // number of peers that have a pending want-block for the CID - wantBlock int - // number of peers that have a pending want-have for the CID - wantHave int - // whether the CID is a broadcast want - isBroadcast bool -} - -// wanted returns true if any peer wants the CID or it's a broadcast want -func (pwm *wantPeerCnts) wanted() bool { - return pwm.wantBlock > 0 || pwm.wantHave > 0 || pwm.isBroadcast -} - -// wantPeerCounts counts how many peers have a pending want-block and want-have -// for the given CID -func (pwm *peerWantManager) wantPeerCounts(c cid.Cid) wantPeerCnts { - blockCount := 0 - haveCount := 0 - for p := range pwm.wantPeers[c] { - pws, ok := pwm.peerWants[p] - if !ok { - log.Errorf("reverse index has extra peer %s for key %s in peerWantManager", string(p), c) - continue - } - - if pws.wantBlocks.Has(c) { - blockCount++ - } else if pws.wantHaves.Has(c) { - haveCount++ - } - } - - return wantPeerCnts{blockCount, haveCount, pwm.broadcastWants.Has(c)} -} - -// Add the peer to the list of peers that have sent a want with the cid -func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) bool { - peers, ok := pwm.wantPeers[c] - if !ok { - peers = make(map[peer.ID]struct{}, 10) - pwm.wantPeers[c] = peers - } - peers[p] = struct{}{} - return !ok -} - -// Remove the peer from the list of peers that have sent a want with the cid -func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) { - if peers, ok := pwm.wantPeers[c]; ok { - delete(peers, p) - if len(peers) == 0 { - delete(pwm.wantPeers, c) - } - } -} - -// GetWantBlocks returns the set of all want-blocks sent to all peers -func (pwm *peerWantManager) getWantBlocks() []cid.Cid { - res := cid.NewSet() - - // Iterate over all known peers - for _, pws := range pwm.peerWants { - // Iterate over all want-blocks - _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { - // Add the CID to the results - res.Add(c) - return nil - }) - } - - return res.Keys() -} - -// GetWantHaves returns the set of all want-haves sent to all peers -func (pwm *peerWantManager) getWantHaves() []cid.Cid { - res := cid.NewSet() - - // Iterate over all peers with active wants. - for _, pws := range pwm.peerWants { - // Iterate over all want-haves - _ = pws.wantHaves.ForEach(func(c cid.Cid) error { - // Add the CID to the results - res.Add(c) - return nil - }) - } - _ = pwm.broadcastWants.ForEach(func(c cid.Cid) error { - res.Add(c) - return nil - }) - - return res.Keys() -} - -// GetWants returns the set of all wants (both want-blocks and want-haves). -func (pwm *peerWantManager) getWants() []cid.Cid { - res := pwm.broadcastWants.Keys() - - // Iterate over all targeted wants, removing ones that are also in the - // broadcast list. - for c := range pwm.wantPeers { - if pwm.broadcastWants.Has(c) { - continue - } - res = append(res, c) - } - - return res -} - -func (pwm *peerWantManager) String() string { - var b bytes.Buffer - for p, ws := range pwm.peerWants { - b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", p, ws.wantHaves.Len(), ws.wantBlocks.Len())) - for _, c := range ws.wantHaves.Keys() { - b.WriteString(fmt.Sprintf(" want-have %s\n", c)) - } - for _, c := range ws.wantBlocks.Keys() { - b.WriteString(fmt.Sprintf(" want-block %s\n", c)) - } - } - return b.String() -} diff --git a/client/internal/peermanager/peerwantmanager_test.go b/client/internal/peermanager/peerwantmanager_test.go deleted file mode 100644 index fdc223d1..00000000 --- a/client/internal/peermanager/peerwantmanager_test.go +++ /dev/null @@ -1,516 +0,0 @@ -package peermanager - -import ( - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -type gauge struct { - count int -} - -func (g *gauge) Inc() { - g.count++ -} -func (g *gauge) Dec() { - g.count-- -} - -type mockPQ struct { - bcst []cid.Cid - wbs []cid.Cid - whs []cid.Cid - cancels []cid.Cid -} - -func (mpq *mockPQ) clear() { - mpq.bcst = nil - mpq.wbs = nil - mpq.whs = nil - mpq.cancels = nil -} - -func (mpq *mockPQ) Startup() {} -func (mpq *mockPQ) Shutdown() {} - -func (mpq *mockPQ) AddBroadcastWantHaves(whs []cid.Cid) { - mpq.bcst = append(mpq.bcst, whs...) -} -func (mpq *mockPQ) AddWants(wbs []cid.Cid, whs []cid.Cid) { - mpq.wbs = append(mpq.wbs, wbs...) - mpq.whs = append(mpq.whs, whs...) -} -func (mpq *mockPQ) AddCancels(cs []cid.Cid) { - mpq.cancels = append(mpq.cancels, cs...) -} -func (mpq *mockPQ) ResponseReceived(ks []cid.Cid) { -} - -func clearSent(pqs map[peer.ID]PeerQueue) { - for _, pqi := range pqs { - pqi.(*mockPQ).clear() - } -} - -func TestEmpty(t *testing.T) { - pwm := newPeerWantManager(&gauge{}, &gauge{}) - - if len(pwm.getWantBlocks()) > 0 { - t.Fatal("Expected GetWantBlocks() to have length 0") - } - if len(pwm.getWantHaves()) > 0 { - t.Fatal("Expected GetWantHaves() to have length 0") - } -} - -func TestPWMBroadcastWantHaves(t *testing.T) { - pwm := newPeerWantManager(&gauge{}, &gauge{}) - - peers := testutil.GeneratePeers(3) - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) - cids3 := testutil.GenerateCids(2) - - peerQueues := make(map[peer.ID]PeerQueue) - for _, p := range peers[:2] { - pq := &mockPQ{} - peerQueues[p] = pq - pwm.addPeer(pq, p) - if len(pq.bcst) > 0 { - t.Errorf("expected no broadcast wants") - } - } - - // Broadcast 2 cids to 2 peers - pwm.broadcastWantHaves(cids) - for _, pqi := range peerQueues { - pq := pqi.(*mockPQ) - if len(pq.bcst) != 2 { - t.Fatal("Expected 2 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids) { - t.Fatal("Expected all cids to be broadcast") - } - } - - // Broadcasting same cids should have no effect - clearSent(peerQueues) - pwm.broadcastWantHaves(cids) - for _, pqi := range peerQueues { - pq := pqi.(*mockPQ) - if len(pq.bcst) != 0 { - t.Fatal("Expected 0 want-haves") - } - } - - // Broadcast 2 other cids - clearSent(peerQueues) - pwm.broadcastWantHaves(cids2) - for _, pqi := range peerQueues { - pq := pqi.(*mockPQ) - if len(pq.bcst) != 2 { - t.Fatal("Expected 2 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids2) { - t.Fatal("Expected all new cids to be broadcast") - } - } - - // Broadcast mix of old and new cids - clearSent(peerQueues) - pwm.broadcastWantHaves(append(cids, cids3...)) - for _, pqi := range peerQueues { - pq := pqi.(*mockPQ) - if len(pq.bcst) != 2 { - t.Fatal("Expected 2 want-haves") - } - // Only new cids should be broadcast - if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids3) { - t.Fatal("Expected all new cids to be broadcast") - } - } - - // Sending want-block for a cid should prevent broadcast to that peer - clearSent(peerQueues) - cids4 := testutil.GenerateCids(4) - wantBlocks := []cid.Cid{cids4[0], cids4[2]} - p0 := peers[0] - p1 := peers[1] - pwm.sendWants(p0, wantBlocks, []cid.Cid{}) - - pwm.broadcastWantHaves(cids4) - pq0 := peerQueues[p0].(*mockPQ) - if len(pq0.bcst) != 2 { // only broadcast 2 / 4 want-haves - t.Fatal("Expected 2 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq0.bcst, []cid.Cid{cids4[1], cids4[3]}) { - t.Fatalf("Expected unsent cids to be broadcast") - } - pq1 := peerQueues[p1].(*mockPQ) - if len(pq1.bcst) != 4 { // broadcast all 4 want-haves - t.Fatal("Expected 4 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq1.bcst, cids4) { - t.Fatal("Expected all cids to be broadcast") - } - - allCids := cids - allCids = append(allCids, cids2...) - allCids = append(allCids, cids3...) - allCids = append(allCids, cids4...) - - // Add another peer - peer2 := peers[2] - pq2 := &mockPQ{} - peerQueues[peer2] = pq2 - pwm.addPeer(pq2, peer2) - if !testutil.MatchKeysIgnoreOrder(pq2.bcst, allCids) { - t.Fatalf("Expected all cids to be broadcast.") - } - - clearSent(peerQueues) - pwm.broadcastWantHaves(allCids) - if len(pq2.bcst) != 0 { - t.Errorf("did not expect to have CIDs to broadcast") - } -} - -func TestPWMSendWants(t *testing.T) { - pwm := newPeerWantManager(&gauge{}, &gauge{}) - - peers := testutil.GeneratePeers(2) - p0 := peers[0] - p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) - - peerQueues := make(map[peer.ID]PeerQueue) - for _, p := range peers[:2] { - pq := &mockPQ{} - peerQueues[p] = pq - pwm.addPeer(pq, p) - } - pq0 := peerQueues[p0].(*mockPQ) - pq1 := peerQueues[p1].(*mockPQ) - - // Send 2 want-blocks and 2 want-haves to p0 - clearSent(peerQueues) - pwm.sendWants(p0, cids, cids2) - if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids) { - t.Fatal("Expected 2 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids2) { - t.Fatal("Expected 2 want-haves") - } - - // Send to p0 - // - 1 old want-block and 2 new want-blocks - // - 1 old want-have and 2 new want-haves - clearSent(peerQueues) - cids3 := testutil.GenerateCids(2) - cids4 := testutil.GenerateCids(2) - pwm.sendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) - if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids3) { - t.Fatal("Expected 2 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids4) { - t.Fatal("Expected 2 want-haves") - } - - // Send to p0 as want-blocks: 1 new want-block, 1 old want-have - clearSent(peerQueues) - cids5 := testutil.GenerateCids(1) - newWantBlockOldWantHave := append(cids5, cids2[0]) - pwm.sendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) - // If a want was sent as a want-have, it should be ok to now send it as a - // want-block - if !testutil.MatchKeysIgnoreOrder(pq0.wbs, newWantBlockOldWantHave) { - t.Fatal("Expected 2 want-blocks") - } - if len(pq0.whs) != 0 { - t.Fatal("Expected 0 want-haves") - } - - // Send to p0 as want-haves: 1 new want-have, 1 old want-block - clearSent(peerQueues) - cids6 := testutil.GenerateCids(1) - newWantHaveOldWantBlock := append(cids6, cids[0]) - pwm.sendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) - // If a want was previously sent as a want-block, it should not be - // possible to now send it as a want-have - if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids6) { - t.Fatal("Expected 1 want-have") - } - if len(pq0.wbs) != 0 { - t.Fatal("Expected 0 want-blocks") - } - - // Send 2 want-blocks and 2 want-haves to p1 - pwm.sendWants(p1, cids, cids2) - if !testutil.MatchKeysIgnoreOrder(pq1.wbs, cids) { - t.Fatal("Expected 2 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pq1.whs, cids2) { - t.Fatal("Expected 2 want-haves") - } -} - -func TestPWMSendCancels(t *testing.T) { - pwm := newPeerWantManager(&gauge{}, &gauge{}) - - peers := testutil.GeneratePeers(2) - p0 := peers[0] - p1 := peers[1] - wb1 := testutil.GenerateCids(2) - wh1 := testutil.GenerateCids(2) - wb2 := testutil.GenerateCids(2) - wh2 := testutil.GenerateCids(2) - allwb := append(wb1, wb2...) - allwh := append(wh1, wh2...) - - peerQueues := make(map[peer.ID]PeerQueue) - for _, p := range peers[:2] { - pq := &mockPQ{} - peerQueues[p] = pq - pwm.addPeer(pq, p) - } - pq0 := peerQueues[p0].(*mockPQ) - pq1 := peerQueues[p1].(*mockPQ) - - // Send 2 want-blocks and 2 want-haves to p0 - pwm.sendWants(p0, wb1, wh1) - // Send 3 want-blocks and 3 want-haves to p1 - // (1 overlapping want-block / want-have with p0) - pwm.sendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) - - if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), allwb) { - t.Fatal("Expected 4 cids to be wanted") - } - if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), allwh) { - t.Fatal("Expected 4 cids to be wanted") - } - - // Cancel 1 want-block and 1 want-have that were sent to p0 - clearSent(peerQueues) - pwm.sendCancels([]cid.Cid{wb1[0], wh1[0]}) - // Should cancel the want-block and want-have - if len(pq1.cancels) != 0 { - t.Fatal("Expected no cancels sent to p1") - } - if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[0], wh1[0]}) { - t.Fatal("Expected 2 cids to be cancelled") - } - if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), append(wb2, wb1[1])) { - t.Fatal("Expected 3 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), append(wh2, wh1[1])) { - t.Fatal("Expected 3 want-haves") - } - - // Cancel everything - clearSent(peerQueues) - allCids := append(allwb, allwh...) - pwm.sendCancels(allCids) - // Should cancel the remaining want-blocks and want-haves for p0 - if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[1], wh1[1]}) { - t.Fatal("Expected un-cancelled cids to be cancelled") - } - - // Should cancel the remaining want-blocks and want-haves for p1 - remainingP1 := append(wb2, wh2...) - remainingP1 = append(remainingP1, wb1[1], wh1[1]) - if len(pq1.cancels) != len(remainingP1) { - t.Fatal("mismatch", len(pq1.cancels), len(remainingP1)) - } - if !testutil.MatchKeysIgnoreOrder(pq1.cancels, remainingP1) { - t.Fatal("Expected un-cancelled cids to be cancelled") - } - if len(pwm.getWantBlocks()) != 0 { - t.Fatal("Expected 0 want-blocks") - } - if len(pwm.getWantHaves()) != 0 { - t.Fatal("Expected 0 want-haves") - } -} - -func TestStats(t *testing.T) { - g := &gauge{} - wbg := &gauge{} - pwm := newPeerWantManager(g, wbg) - - peers := testutil.GeneratePeers(2) - p0 := peers[0] - p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) - - peerQueues := make(map[peer.ID]PeerQueue) - pq := &mockPQ{} - peerQueues[p0] = pq - pwm.addPeer(pq, p0) - - // Send 2 want-blocks and 2 want-haves to p0 - pwm.sendWants(p0, cids, cids2) - - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 2 { - t.Fatal("Expected 2 want-blocks") - } - - // Send 1 old want-block and 2 new want-blocks to p0 - cids3 := testutil.GenerateCids(2) - pwm.sendWants(p0, append(cids3, cids[0]), []cid.Cid{}) - - if g.count != 6 { - t.Fatal("Expected 6 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } - - // Broadcast 1 old want-have and 2 new want-haves - cids4 := testutil.GenerateCids(2) - pwm.broadcastWantHaves(append(cids4, cids2[0])) - if g.count != 8 { - t.Fatal("Expected 8 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } - - // Add a second peer - pwm.addPeer(pq, p1) - - if g.count != 8 { - t.Fatal("Expected 8 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } - - // Cancel 1 want-block that was sent to p0 - // and 1 want-block that was not sent - cids5 := testutil.GenerateCids(1) - pwm.sendCancels(append(cids5, cids[0])) - - if g.count != 7 { - t.Fatal("Expected 7 wants") - } - if wbg.count != 3 { - t.Fatal("Expected 3 want-blocks") - } - - // Remove first peer - pwm.removePeer(p0) - - // Should still have 3 broadcast wants - if g.count != 3 { - t.Fatal("Expected 3 wants") - } - if wbg.count != 0 { - t.Fatal("Expected all want-blocks to be removed") - } - - // Remove second peer - pwm.removePeer(p1) - - // Should still have 3 broadcast wants - if g.count != 3 { - t.Fatal("Expected 3 wants") - } - if wbg.count != 0 { - t.Fatal("Expected 0 want-blocks") - } - - // Cancel one remaining broadcast want-have - pwm.sendCancels(cids2[:1]) - if g.count != 2 { - t.Fatal("Expected 2 wants") - } - if wbg.count != 0 { - t.Fatal("Expected 0 want-blocks") - } -} - -func TestStatsOverlappingWantBlockWantHave(t *testing.T) { - g := &gauge{} - wbg := &gauge{} - pwm := newPeerWantManager(g, wbg) - - peers := testutil.GeneratePeers(2) - p0 := peers[0] - p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) - - pwm.addPeer(&mockPQ{}, p0) - pwm.addPeer(&mockPQ{}, p1) - - // Send 2 want-blocks and 2 want-haves to p0 - pwm.sendWants(p0, cids, cids2) - - // Send opposite: - // 2 want-haves and 2 want-blocks to p1 - pwm.sendWants(p1, cids2, cids) - - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } - - // Cancel 1 of each group of cids - pwm.sendCancels([]cid.Cid{cids[0], cids2[0]}) - - if g.count != 2 { - t.Fatal("Expected 2 wants") - } - if wbg.count != 2 { - t.Fatal("Expected 2 want-blocks") - } -} - -func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { - g := &gauge{} - wbg := &gauge{} - pwm := newPeerWantManager(g, wbg) - - peers := testutil.GeneratePeers(2) - p0 := peers[0] - p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) - - pwm.addPeer(&mockPQ{}, p0) - pwm.addPeer(&mockPQ{}, p1) - - // Send 2 want-blocks and 2 want-haves to p0 - pwm.sendWants(p0, cids, cids2) - - // Send opposite: - // 2 want-haves and 2 want-blocks to p1 - pwm.sendWants(p1, cids2, cids) - - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } - - // Remove p0 - pwm.removePeer(p0) - - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 2 { - t.Fatal("Expected 2 want-blocks") - } -} diff --git a/client/internal/providerquerymanager/providerquerymanager.go b/client/internal/providerquerymanager/providerquerymanager.go deleted file mode 100644 index 9ef2e5fd..00000000 --- a/client/internal/providerquerymanager/providerquerymanager.go +++ /dev/null @@ -1,430 +0,0 @@ -package providerquerymanager - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var log = logging.Logger("bitswap") - -const ( - maxProviders = 10 - maxInProcessRequests = 6 - defaultTimeout = 10 * time.Second -) - -type inProgressRequestStatus struct { - ctx context.Context - cancelFn func() - providersSoFar []peer.ID - listeners map[chan peer.ID]struct{} -} - -type findProviderRequest struct { - k cid.Cid - ctx context.Context -} - -// ProviderQueryNetwork is an interface for finding providers and connecting to -// peers. -type ProviderQueryNetwork interface { - ConnectTo(context.Context, peer.ID) error - FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID -} - -type providerQueryMessage interface { - debugMessage() string - handle(pqm *ProviderQueryManager) -} - -type receivedProviderMessage struct { - ctx context.Context - k cid.Cid - p peer.ID -} - -type finishedProviderQueryMessage struct { - ctx context.Context - k cid.Cid -} - -type newProvideQueryMessage struct { - ctx context.Context - k cid.Cid - inProgressRequestChan chan<- inProgressRequest -} - -type cancelRequestMessage struct { - incomingProviders chan peer.ID - k cid.Cid -} - -// ProviderQueryManager manages requests to find more providers for blocks -// for bitswap sessions. It's main goals are to: -// - rate limit requests -- don't have too many find provider calls running -// simultaneously -// - connect to found peers and filter them if it can't connect -// - ensure two findprovider calls for the same block don't run concurrently -// - manage timeouts -type ProviderQueryManager struct { - ctx context.Context - network ProviderQueryNetwork - providerQueryMessages chan providerQueryMessage - providerRequestsProcessing chan *findProviderRequest - incomingFindProviderRequests chan *findProviderRequest - - findProviderTimeout time.Duration - timeoutMutex sync.RWMutex - - // do not touch outside the run loop - inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus -} - -// New initializes a new ProviderQueryManager for a given context and a given -// network provider. -func New(ctx context.Context, network ProviderQueryNetwork) *ProviderQueryManager { - return &ProviderQueryManager{ - ctx: ctx, - network: network, - providerQueryMessages: make(chan providerQueryMessage, 16), - providerRequestsProcessing: make(chan *findProviderRequest), - incomingFindProviderRequests: make(chan *findProviderRequest), - inProgressRequestStatuses: make(map[cid.Cid]*inProgressRequestStatus), - findProviderTimeout: defaultTimeout, - } -} - -// Startup starts processing for the ProviderQueryManager. -func (pqm *ProviderQueryManager) Startup() { - go pqm.run() -} - -type inProgressRequest struct { - providersSoFar []peer.ID - incoming chan peer.ID -} - -// SetFindProviderTimeout changes the timeout for finding providers -func (pqm *ProviderQueryManager) SetFindProviderTimeout(findProviderTimeout time.Duration) { - pqm.timeoutMutex.Lock() - pqm.findProviderTimeout = findProviderTimeout - pqm.timeoutMutex.Unlock() -} - -// FindProvidersAsync finds providers for the given block. -func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid) <-chan peer.ID { - inProgressRequestChan := make(chan inProgressRequest) - - select { - case pqm.providerQueryMessages <- &newProvideQueryMessage{ - ctx: sessionCtx, - k: k, - inProgressRequestChan: inProgressRequestChan, - }: - case <-pqm.ctx.Done(): - ch := make(chan peer.ID) - close(ch) - return ch - case <-sessionCtx.Done(): - ch := make(chan peer.ID) - close(ch) - return ch - } - - // DO NOT select on sessionCtx. We only want to abort here if we're - // shutting down because we can't actually _cancel_ the request till we - // get to receiveProviders. - var receivedInProgressRequest inProgressRequest - select { - case <-pqm.ctx.Done(): - ch := make(chan peer.ID) - close(ch) - return ch - case receivedInProgressRequest = <-inProgressRequestChan: - } - - return pqm.receiveProviders(sessionCtx, k, receivedInProgressRequest) -} - -func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, receivedInProgressRequest inProgressRequest) <-chan peer.ID { - // maintains an unbuffered queue for incoming providers for given request for a given session - // essentially, as a provider comes in, for a given CID, we want to immediately broadcast to all - // sessions that queried that CID, without worrying about whether the client code is actually - // reading from the returned channel -- so that the broadcast never blocks - // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd - returnedProviders := make(chan peer.ID) - receivedProviders := append([]peer.ID(nil), receivedInProgressRequest.providersSoFar[0:]...) - incomingProviders := receivedInProgressRequest.incoming - - go func() { - defer close(returnedProviders) - outgoingProviders := func() chan<- peer.ID { - if len(receivedProviders) == 0 { - return nil - } - return returnedProviders - } - nextProvider := func() peer.ID { - if len(receivedProviders) == 0 { - return "" - } - return receivedProviders[0] - } - for len(receivedProviders) > 0 || incomingProviders != nil { - select { - case <-pqm.ctx.Done(): - return - case <-sessionCtx.Done(): - if incomingProviders != nil { - pqm.cancelProviderRequest(k, incomingProviders) - } - return - case provider, ok := <-incomingProviders: - if !ok { - incomingProviders = nil - } else { - receivedProviders = append(receivedProviders, provider) - } - case outgoingProviders() <- nextProvider(): - receivedProviders = receivedProviders[1:] - } - } - }() - return returnedProviders -} - -func (pqm *ProviderQueryManager) cancelProviderRequest(k cid.Cid, incomingProviders chan peer.ID) { - cancelMessageChannel := pqm.providerQueryMessages - for { - select { - case cancelMessageChannel <- &cancelRequestMessage{ - incomingProviders: incomingProviders, - k: k, - }: - cancelMessageChannel = nil - // clear out any remaining providers, in case and "incoming provider" - // messages get processed before our cancel message - case _, ok := <-incomingProviders: - if !ok { - return - } - case <-pqm.ctx.Done(): - return - } - } -} - -func (pqm *ProviderQueryManager) findProviderWorker() { - // findProviderWorker just cycles through incoming provider queries one - // at a time. We have six of these workers running at once - // to let requests go in parallel but keep them rate limited - for { - select { - case fpr, ok := <-pqm.providerRequestsProcessing: - if !ok { - return - } - k := fpr.k - log.Debugf("Beginning Find Provider Request for cid: %s", k.String()) - pqm.timeoutMutex.RLock() - findProviderCtx, cancel := context.WithTimeout(fpr.ctx, pqm.findProviderTimeout) - pqm.timeoutMutex.RUnlock() - providers := pqm.network.FindProvidersAsync(findProviderCtx, k, maxProviders) - wg := &sync.WaitGroup{} - for p := range providers { - wg.Add(1) - go func(p peer.ID) { - defer wg.Done() - err := pqm.network.ConnectTo(findProviderCtx, p) - if err != nil { - log.Debugf("failed to connect to provider %s: %s", p, err) - return - } - select { - case pqm.providerQueryMessages <- &receivedProviderMessage{ - ctx: findProviderCtx, - k: k, - p: p, - }: - case <-pqm.ctx.Done(): - return - } - }(p) - } - wg.Wait() - cancel() - select { - case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ - ctx: findProviderCtx, - k: k, - }: - case <-pqm.ctx.Done(): - } - case <-pqm.ctx.Done(): - return - } - } -} - -func (pqm *ProviderQueryManager) providerRequestBufferWorker() { - // the provider request buffer worker just maintains an unbounded - // buffer for incoming provider queries and dispatches to the find - // provider workers as they become available - // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd - var providerQueryRequestBuffer []*findProviderRequest - nextProviderQuery := func() *findProviderRequest { - if len(providerQueryRequestBuffer) == 0 { - return nil - } - return providerQueryRequestBuffer[0] - } - outgoingRequests := func() chan<- *findProviderRequest { - if len(providerQueryRequestBuffer) == 0 { - return nil - } - return pqm.providerRequestsProcessing - } - - for { - select { - case incomingRequest, ok := <-pqm.incomingFindProviderRequests: - if !ok { - return - } - providerQueryRequestBuffer = append(providerQueryRequestBuffer, incomingRequest) - case outgoingRequests() <- nextProviderQuery(): - providerQueryRequestBuffer = providerQueryRequestBuffer[1:] - case <-pqm.ctx.Done(): - return - } - } -} - -func (pqm *ProviderQueryManager) cleanupInProcessRequests() { - for _, requestStatus := range pqm.inProgressRequestStatuses { - for listener := range requestStatus.listeners { - close(listener) - } - requestStatus.cancelFn() - } -} - -func (pqm *ProviderQueryManager) run() { - defer pqm.cleanupInProcessRequests() - - go pqm.providerRequestBufferWorker() - for i := 0; i < maxInProcessRequests; i++ { - go pqm.findProviderWorker() - } - - for { - select { - case nextMessage := <-pqm.providerQueryMessages: - log.Debug(nextMessage.debugMessage()) - nextMessage.handle(pqm) - case <-pqm.ctx.Done(): - return - } - } -} - -func (rpm *receivedProviderMessage) debugMessage() string { - return fmt.Sprintf("Received provider (%s) for cid (%s)", rpm.p.String(), rpm.k.String()) -} - -func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { - requestStatus, ok := pqm.inProgressRequestStatuses[rpm.k] - if !ok { - log.Errorf("Received provider (%s) for cid (%s) not requested", rpm.p.String(), rpm.k.String()) - return - } - requestStatus.providersSoFar = append(requestStatus.providersSoFar, rpm.p) - for listener := range requestStatus.listeners { - select { - case listener <- rpm.p: - case <-pqm.ctx.Done(): - return - } - } -} - -func (fpqm *finishedProviderQueryMessage) debugMessage() string { - return fmt.Sprintf("Finished Provider Query on cid: %s", fpqm.k.String()) -} - -func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { - requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] - if !ok { - // we canceled the request as it finished. - return - } - for listener := range requestStatus.listeners { - close(listener) - } - delete(pqm.inProgressRequestStatuses, fpqm.k) - requestStatus.cancelFn() -} - -func (npqm *newProvideQueryMessage) debugMessage() string { - return fmt.Sprintf("New Provider Query on cid: %s", npqm.k.String()) -} - -func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { - requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] - if !ok { - - ctx, cancelFn := context.WithCancel(pqm.ctx) - requestStatus = &inProgressRequestStatus{ - listeners: make(map[chan peer.ID]struct{}), - ctx: ctx, - cancelFn: cancelFn, - } - pqm.inProgressRequestStatuses[npqm.k] = requestStatus - select { - case pqm.incomingFindProviderRequests <- &findProviderRequest{ - k: npqm.k, - ctx: ctx, - }: - case <-pqm.ctx.Done(): - return - } - } - inProgressChan := make(chan peer.ID) - requestStatus.listeners[inProgressChan] = struct{}{} - select { - case npqm.inProgressRequestChan <- inProgressRequest{ - providersSoFar: requestStatus.providersSoFar, - incoming: inProgressChan, - }: - case <-pqm.ctx.Done(): - } -} - -func (crm *cancelRequestMessage) debugMessage() string { - return fmt.Sprintf("Cancel provider query on cid: %s", crm.k.String()) -} - -func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { - requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] - if !ok { - // Request finished while queued. - return - } - _, ok = requestStatus.listeners[crm.incomingProviders] - if !ok { - // Request finished and _restarted_ while queued. - return - } - delete(requestStatus.listeners, crm.incomingProviders) - close(crm.incomingProviders) - if len(requestStatus.listeners) == 0 { - delete(pqm.inProgressRequestStatuses, crm.k) - requestStatus.cancelFn() - } -} diff --git a/client/internal/providerquerymanager/providerquerymanager_test.go b/client/internal/providerquerymanager/providerquerymanager_test.go deleted file mode 100644 index 2ca2ffaf..00000000 --- a/client/internal/providerquerymanager/providerquerymanager_test.go +++ /dev/null @@ -1,370 +0,0 @@ -package providerquerymanager - -import ( - "context" - "errors" - "reflect" - "sync" - "testing" - "time" - - "github.com/ipfs/go-bitswap/internal/testutil" - - cid "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" -) - -type fakeProviderNetwork struct { - peersFound []peer.ID - connectError error - delay time.Duration - connectDelay time.Duration - queriesMadeMutex sync.RWMutex - queriesMade int - liveQueries int -} - -func (fpn *fakeProviderNetwork) ConnectTo(context.Context, peer.ID) error { - time.Sleep(fpn.connectDelay) - return fpn.connectError -} - -func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { - fpn.queriesMadeMutex.Lock() - fpn.queriesMade++ - fpn.liveQueries++ - fpn.queriesMadeMutex.Unlock() - incomingPeers := make(chan peer.ID) - go func() { - defer close(incomingPeers) - for _, p := range fpn.peersFound { - time.Sleep(fpn.delay) - select { - case <-ctx.Done(): - return - default: - } - select { - case incomingPeers <- p: - case <-ctx.Done(): - return - } - } - fpn.queriesMadeMutex.Lock() - fpn.liveQueries-- - fpn.queriesMadeMutex.Unlock() - }() - - return incomingPeers -} - -func TestNormalSimultaneousFetch(t *testing.T) { - peers := testutil.GeneratePeers(10) - fpn := &fakeProviderNetwork{ - peersFound: peers, - delay: 1 * time.Millisecond, - } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - keys := testutil.GenerateCids(2) - - sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[1]) - - var firstPeersReceived []peer.ID - for p := range firstRequestChan { - firstPeersReceived = append(firstPeersReceived, p) - } - - var secondPeersReceived []peer.ID - for p := range secondRequestChan { - secondPeersReceived = append(secondPeersReceived, p) - } - - if len(firstPeersReceived) != len(peers) || len(secondPeersReceived) != len(peers) { - t.Fatal("Did not collect all peers for request that was completed") - } - - fpn.queriesMadeMutex.Lock() - defer fpn.queriesMadeMutex.Unlock() - if fpn.queriesMade != 2 { - t.Fatal("Did not dedup provider requests running simultaneously") - } - -} - -func TestDedupingProviderRequests(t *testing.T) { - peers := testutil.GeneratePeers(10) - fpn := &fakeProviderNetwork{ - peersFound: peers, - delay: 1 * time.Millisecond, - } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - key := testutil.GenerateCids(1)[0] - - sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) - - var firstPeersReceived []peer.ID - for p := range firstRequestChan { - firstPeersReceived = append(firstPeersReceived, p) - } - - var secondPeersReceived []peer.ID - for p := range secondRequestChan { - secondPeersReceived = append(secondPeersReceived, p) - } - - if len(firstPeersReceived) != len(peers) || len(secondPeersReceived) != len(peers) { - t.Fatal("Did not collect all peers for request that was completed") - } - - if !reflect.DeepEqual(firstPeersReceived, secondPeersReceived) { - t.Fatal("Did not receive the same response to both find provider requests") - } - fpn.queriesMadeMutex.Lock() - defer fpn.queriesMadeMutex.Unlock() - if fpn.queriesMade != 1 { - t.Fatal("Did not dedup provider requests running simultaneously") - } -} - -func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { - peers := testutil.GeneratePeers(10) - fpn := &fakeProviderNetwork{ - peersFound: peers, - delay: 1 * time.Millisecond, - } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - - key := testutil.GenerateCids(1)[0] - - // first session will cancel before done - firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) - defer firstCancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key) - secondSessionCtx, secondCancel := context.WithTimeout(ctx, 5*time.Second) - defer secondCancel() - secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key) - - var firstPeersReceived []peer.ID - for p := range firstRequestChan { - firstPeersReceived = append(firstPeersReceived, p) - } - - var secondPeersReceived []peer.ID - for p := range secondRequestChan { - secondPeersReceived = append(secondPeersReceived, p) - } - - if len(secondPeersReceived) != len(peers) { - t.Fatal("Did not collect all peers for request that was completed") - } - - if len(firstPeersReceived) >= len(peers) { - t.Fatal("Collected all peers on cancelled peer, should have been cancelled immediately") - } - fpn.queriesMadeMutex.Lock() - defer fpn.queriesMadeMutex.Unlock() - if fpn.queriesMade != 1 { - t.Fatal("Did not dedup provider requests running simultaneously") - } -} - -func TestCancelManagerExitsGracefully(t *testing.T) { - peers := testutil.GeneratePeers(10) - fpn := &fakeProviderNetwork{ - peersFound: peers, - delay: 1 * time.Millisecond, - } - ctx := context.Background() - managerCtx, managerCancel := context.WithTimeout(ctx, 5*time.Millisecond) - defer managerCancel() - providerQueryManager := New(managerCtx, fpn) - providerQueryManager.Startup() - - key := testutil.GenerateCids(1)[0] - - sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) - defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) - - var firstPeersReceived []peer.ID - for p := range firstRequestChan { - firstPeersReceived = append(firstPeersReceived, p) - } - - var secondPeersReceived []peer.ID - for p := range secondRequestChan { - secondPeersReceived = append(secondPeersReceived, p) - } - - if len(firstPeersReceived) >= len(peers) || - len(secondPeersReceived) >= len(peers) { - t.Fatal("Did not cancel requests in progress correctly") - } -} - -func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { - peers := testutil.GeneratePeers(10) - fpn := &fakeProviderNetwork{ - peersFound: peers, - connectError: errors.New("not able to connect"), - delay: 1 * time.Millisecond, - } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - - key := testutil.GenerateCids(1)[0] - - sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) - defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) - - var firstPeersReceived []peer.ID - for p := range firstRequestChan { - firstPeersReceived = append(firstPeersReceived, p) - } - - var secondPeersReceived []peer.ID - for p := range secondRequestChan { - secondPeersReceived = append(secondPeersReceived, p) - } - - if len(firstPeersReceived) != 0 || len(secondPeersReceived) != 0 { - t.Fatal("Did not filter out peers with connection issues") - } - -} - -func TestRateLimitingRequests(t *testing.T) { - peers := testutil.GeneratePeers(10) - fpn := &fakeProviderNetwork{ - peersFound: peers, - delay: 5 * time.Millisecond, - } - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - - keys := testutil.GenerateCids(maxInProcessRequests + 1) - sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - var requestChannels []<-chan peer.ID - for i := 0; i < maxInProcessRequests+1; i++ { - requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i])) - } - time.Sleep(20 * time.Millisecond) - fpn.queriesMadeMutex.Lock() - if fpn.liveQueries != maxInProcessRequests { - t.Logf("Queries made: %d\n", fpn.liveQueries) - t.Fatal("Did not limit parallel requests to rate limit") - } - fpn.queriesMadeMutex.Unlock() - for i := 0; i < maxInProcessRequests+1; i++ { - for range requestChannels[i] { - } - } - - fpn.queriesMadeMutex.Lock() - defer fpn.queriesMadeMutex.Unlock() - if fpn.queriesMade != maxInProcessRequests+1 { - t.Logf("Queries made: %d\n", fpn.queriesMade) - t.Fatal("Did not make all seperate requests") - } -} - -func TestFindProviderTimeout(t *testing.T) { - peers := testutil.GeneratePeers(10) - fpn := &fakeProviderNetwork{ - peersFound: peers, - delay: 10 * time.Millisecond, - } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - providerQueryManager.SetFindProviderTimeout(2 * time.Millisecond) - keys := testutil.GenerateCids(1) - - sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) - var firstPeersReceived []peer.ID - for p := range firstRequestChan { - firstPeersReceived = append(firstPeersReceived, p) - } - if len(firstPeersReceived) >= len(peers) { - t.Fatal("Find provider request should have timed out, did not") - } -} - -func TestFindProviderPreCanceled(t *testing.T) { - peers := testutil.GeneratePeers(10) - fpn := &fakeProviderNetwork{ - peersFound: peers, - delay: 1 * time.Millisecond, - } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) - keys := testutil.GenerateCids(1) - - sessionCtx, cancel := context.WithCancel(ctx) - cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) - if firstRequestChan == nil { - t.Fatal("expected non-nil channel") - } - select { - case <-firstRequestChan: - case <-time.After(10 * time.Millisecond): - t.Fatal("shouldn't have blocked waiting on a closed context") - } -} - -func TestCancelFindProvidersAfterCompletion(t *testing.T) { - peers := testutil.GeneratePeers(2) - fpn := &fakeProviderNetwork{ - peersFound: peers, - delay: 1 * time.Millisecond, - } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) - keys := testutil.GenerateCids(1) - - sessionCtx, cancel := context.WithCancel(ctx) - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) - <-firstRequestChan // wait for everything to start. - time.Sleep(10 * time.Millisecond) // wait for the incoming providres to stop. - cancel() // cancel the context. - - timer := time.NewTimer(10 * time.Millisecond) - defer timer.Stop() - for { - select { - case _, ok := <-firstRequestChan: - if !ok { - return - } - case <-timer.C: - t.Fatal("should have finished receiving responses within timeout") - } - } -} diff --git a/client/internal/session/cidqueue.go b/client/internal/session/cidqueue.go deleted file mode 100644 index aedfa944..00000000 --- a/client/internal/session/cidqueue.go +++ /dev/null @@ -1,63 +0,0 @@ -package session - -import cid "github.com/ipfs/go-cid" - -type cidQueue struct { - elems []cid.Cid - eset *cid.Set -} - -func newCidQueue() *cidQueue { - return &cidQueue{eset: cid.NewSet()} -} - -func (cq *cidQueue) Pop() cid.Cid { - for { - if len(cq.elems) == 0 { - return cid.Cid{} - } - - out := cq.elems[0] - cq.elems = cq.elems[1:] - - if cq.eset.Has(out) { - cq.eset.Remove(out) - return out - } - } -} - -func (cq *cidQueue) Cids() []cid.Cid { - // Lazily delete from the list any cids that were removed from the set - if len(cq.elems) > cq.eset.Len() { - i := 0 - for _, c := range cq.elems { - if cq.eset.Has(c) { - cq.elems[i] = c - i++ - } - } - cq.elems = cq.elems[:i] - } - - // Make a copy of the cids - return append([]cid.Cid{}, cq.elems...) -} - -func (cq *cidQueue) Push(c cid.Cid) { - if cq.eset.Visit(c) { - cq.elems = append(cq.elems, c) - } -} - -func (cq *cidQueue) Remove(c cid.Cid) { - cq.eset.Remove(c) -} - -func (cq *cidQueue) Has(c cid.Cid) bool { - return cq.eset.Has(c) -} - -func (cq *cidQueue) Len() int { - return cq.eset.Len() -} diff --git a/client/internal/session/peerresponsetracker.go b/client/internal/session/peerresponsetracker.go deleted file mode 100644 index d81c3b02..00000000 --- a/client/internal/session/peerresponsetracker.go +++ /dev/null @@ -1,70 +0,0 @@ -package session - -import ( - "math/rand" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// peerResponseTracker keeps track of how many times each peer was the first -// to send us a block for a given CID (used to rank peers) -type peerResponseTracker struct { - firstResponder map[peer.ID]int -} - -func newPeerResponseTracker() *peerResponseTracker { - return &peerResponseTracker{ - firstResponder: make(map[peer.ID]int), - } -} - -// receivedBlockFrom is called when a block is received from a peer -// (only called first time block is received) -func (prt *peerResponseTracker) receivedBlockFrom(from peer.ID) { - prt.firstResponder[from]++ -} - -// choose picks a peer from the list of candidate peers, favouring those peers -// that were first to send us previous blocks -func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { - if len(peers) == 0 { - return "" - } - - rnd := rand.Float64() - - // Find the total received blocks for all candidate peers - total := 0 - for _, p := range peers { - total += prt.getPeerCount(p) - } - - // Choose one of the peers with a chance proportional to the number - // of blocks received from that peer - counted := 0.0 - for _, p := range peers { - counted += float64(prt.getPeerCount(p)) / float64(total) - if counted > rnd { - return p - } - } - - // We shouldn't get here unless there is some weirdness with floating point - // math that doesn't quite cover the whole range of peers in the for loop - // so just choose the last peer. - index := len(peers) - 1 - return peers[index] -} - -// getPeerCount returns the number of times the peer was first to send us a -// block -func (prt *peerResponseTracker) getPeerCount(p peer.ID) int { - count, ok := prt.firstResponder[p] - if ok { - return count - } - - // Make sure there is always at least a small chance a new peer - // will be chosen - return 1 -} diff --git a/client/internal/session/peerresponsetracker_test.go b/client/internal/session/peerresponsetracker_test.go deleted file mode 100644 index f1f58cd9..00000000 --- a/client/internal/session/peerresponsetracker_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package session - -import ( - "math" - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -func TestPeerResponseTrackerInit(t *testing.T) { - peers := testutil.GeneratePeers(2) - prt := newPeerResponseTracker() - - if prt.choose([]peer.ID{}) != "" { - t.Fatal("expected empty peer ID") - } - if prt.choose([]peer.ID{peers[0]}) != peers[0] { - t.Fatal("expected single peer ID") - } - p := prt.choose(peers) - if p != peers[0] && p != peers[1] { - t.Fatal("expected randomly chosen peer") - } -} - -func TestPeerResponseTrackerProbabilityUnknownPeers(t *testing.T) { - peers := testutil.GeneratePeers(4) - prt := newPeerResponseTracker() - - choices := []int{0, 0, 0, 0} - count := 1000 - for i := 0; i < count; i++ { - p := prt.choose(peers) - if p == peers[0] { - choices[0]++ - } else if p == peers[1] { - choices[1]++ - } else if p == peers[2] { - choices[2]++ - } else if p == peers[3] { - choices[3]++ - } - } - - for _, c := range choices { - if c == 0 { - t.Fatal("expected each peer to be chosen at least once") - } - if math.Abs(float64(c-choices[0])) > 0.2*float64(count) { - t.Fatal("expected unknown peers to have roughly equal chance of being chosen") - } - } -} - -func TestPeerResponseTrackerProbabilityOneKnownOneUnknownPeer(t *testing.T) { - peers := testutil.GeneratePeers(2) - prt := newPeerResponseTracker() - - prt.receivedBlockFrom(peers[0]) - - chooseFirst := 0 - chooseSecond := 0 - for i := 0; i < 1000; i++ { - p := prt.choose(peers) - if p == peers[0] { - chooseFirst++ - } else if p == peers[1] { - chooseSecond++ - } - } - - if chooseSecond == 0 { - t.Fatal("expected unknown peer to occasionally be chosen") - } - if chooseSecond > chooseFirst { - t.Fatal("expected known peer to be chosen more often") - } -} - -func TestPeerResponseTrackerProbabilityProportional(t *testing.T) { - peers := testutil.GeneratePeers(3) - prt := newPeerResponseTracker() - - probabilities := []float64{0.1, 0.6, 0.3} - count := 1000 - for pi, prob := range probabilities { - for i := 0; float64(i) < float64(count)*prob; i++ { - prt.receivedBlockFrom(peers[pi]) - } - } - - var choices []int - for range probabilities { - choices = append(choices, 0) - } - - for i := 0; i < count; i++ { - p := prt.choose(peers) - if p == peers[0] { - choices[0]++ - } else if p == peers[1] { - choices[1]++ - } else if p == peers[2] { - choices[2]++ - } - } - - for i, c := range choices { - if c == 0 { - t.Fatal("expected each peer to be chosen at least once") - } - if math.Abs(float64(c)-(float64(count)*probabilities[i])) > 0.2*float64(count) { - t.Fatal("expected peers to be chosen proportionally to probability") - } - } -} diff --git a/client/internal/session/sentwantblockstracker.go b/client/internal/session/sentwantblockstracker.go deleted file mode 100644 index 0dfe0630..00000000 --- a/client/internal/session/sentwantblockstracker.go +++ /dev/null @@ -1,33 +0,0 @@ -package session - -import ( - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// sentWantBlocksTracker keeps track of which peers we've sent a want-block to -type sentWantBlocksTracker struct { - sentWantBlocks map[peer.ID]map[cid.Cid]struct{} -} - -func newSentWantBlocksTracker() *sentWantBlocksTracker { - return &sentWantBlocksTracker{ - sentWantBlocks: make(map[peer.ID]map[cid.Cid]struct{}), - } -} - -func (s *sentWantBlocksTracker) addSentWantBlocksTo(p peer.ID, ks []cid.Cid) { - cids, ok := s.sentWantBlocks[p] - if !ok { - cids = make(map[cid.Cid]struct{}, len(ks)) - s.sentWantBlocks[p] = cids - } - for _, c := range ks { - cids[c] = struct{}{} - } -} - -func (s *sentWantBlocksTracker) haveSentWantBlockTo(p peer.ID, c cid.Cid) bool { - _, ok := s.sentWantBlocks[p][c] - return ok -} diff --git a/client/internal/session/sentwantblockstracker_test.go b/client/internal/session/sentwantblockstracker_test.go deleted file mode 100644 index 2449840c..00000000 --- a/client/internal/session/sentwantblockstracker_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package session - -import ( - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" -) - -func TestSendWantBlocksTracker(t *testing.T) { - peers := testutil.GeneratePeers(2) - cids := testutil.GenerateCids(2) - swbt := newSentWantBlocksTracker() - - if swbt.haveSentWantBlockTo(peers[0], cids[0]) { - t.Fatal("expected not to have sent anything yet") - } - - swbt.addSentWantBlocksTo(peers[0], cids) - if !swbt.haveSentWantBlockTo(peers[0], cids[0]) { - t.Fatal("expected to have sent cid to peer") - } - if !swbt.haveSentWantBlockTo(peers[0], cids[1]) { - t.Fatal("expected to have sent cid to peer") - } - if swbt.haveSentWantBlockTo(peers[1], cids[0]) { - t.Fatal("expected not to have sent cid to peer") - } -} diff --git a/client/internal/session/session.go b/client/internal/session/session.go deleted file mode 100644 index 51e787e2..00000000 --- a/client/internal/session/session.go +++ /dev/null @@ -1,508 +0,0 @@ -package session - -import ( - "context" - "time" - - "github.com/ipfs/go-bitswap/client/internal" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - bsgetter "github.com/ipfs/go-bitswap/client/internal/getter" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - delay "github.com/ipfs/go-ipfs-delay" - logging "github.com/ipfs/go-log" - peer "github.com/libp2p/go-libp2p/core/peer" - "go.uber.org/zap" -) - -var log = logging.Logger("bs:sess") -var sflog = log.Desugar() - -const ( - broadcastLiveWantsLimit = 64 -) - -// PeerManager keeps track of which sessions are interested in which peers -// and takes care of sending wants for the sessions -type PeerManager interface { - // RegisterSession tells the PeerManager that the session is interested - // in a peer's connection state - RegisterSession(peer.ID, bspm.Session) - // UnregisterSession tells the PeerManager that the session is no longer - // interested in a peer's connection state - UnregisterSession(uint64) - // SendWants tells the PeerManager to send wants to the given peer - SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) - // BroadcastWantHaves sends want-haves to all connected peers (used for - // session discovery) - BroadcastWantHaves(context.Context, []cid.Cid) - // SendCancels tells the PeerManager to send cancels to all peers - SendCancels(context.Context, []cid.Cid) -} - -// SessionManager manages all the sessions -type SessionManager interface { - // Remove a session (called when the session shuts down) - RemoveSession(sesid uint64) - // Cancel wants (called when a call to GetBlocks() is cancelled) - CancelSessionWants(sid uint64, wants []cid.Cid) -} - -// SessionPeerManager keeps track of peers in the session -type SessionPeerManager interface { - // PeersDiscovered indicates if any peers have been discovered yet - PeersDiscovered() bool - // Shutdown the SessionPeerManager - Shutdown() - // Adds a peer to the session, returning true if the peer is new - AddPeer(peer.ID) bool - // Removes a peer from the session, returning true if the peer existed - RemovePeer(peer.ID) bool - // All peers in the session - Peers() []peer.ID - // Whether there are any peers in the session - HasPeers() bool - // Protect connection from being pruned by the connection manager - ProtectConnection(peer.ID) -} - -// ProviderFinder is used to find providers for a given key -type ProviderFinder interface { - // FindProvidersAsync searches for peers that provide the given CID - FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID -} - -// opType is the kind of operation that is being processed by the event loop -type opType int - -const ( - // Receive blocks - opReceive opType = iota - // Want blocks - opWant - // Cancel wants - opCancel - // Broadcast want-haves - opBroadcast - // Wants sent to peers - opWantsSent -) - -type op struct { - op opType - keys []cid.Cid -} - -// Session holds state for an individual bitswap transfer operation. -// This allows bitswap to make smarter decisions about who to send wantlist -// info to, and who to request blocks from. -type Session struct { - // dependencies - ctx context.Context - shutdown func() - sm SessionManager - pm PeerManager - sprm SessionPeerManager - providerFinder ProviderFinder - sim *bssim.SessionInterestManager - - sw sessionWants - sws sessionWantSender - - latencyTrkr latencyTracker - - // channels - incoming chan op - tickDelayReqs chan time.Duration - - // do not touch outside run loop - idleTick *time.Timer - periodicSearchTimer *time.Timer - baseTickDelay time.Duration - consecutiveTicks int - initialSearchDelay time.Duration - periodicSearchDelay delay.D - // identifiers - notif notifications.PubSub - id uint64 - - self peer.ID -} - -// New creates a new bitswap session whose lifetime is bounded by the -// given context. -func New( - ctx context.Context, - sm SessionManager, - id uint64, - sprm SessionPeerManager, - providerFinder ProviderFinder, - sim *bssim.SessionInterestManager, - pm PeerManager, - bpm *bsbpm.BlockPresenceManager, - notif notifications.PubSub, - initialSearchDelay time.Duration, - periodicSearchDelay delay.D, - self peer.ID) *Session { - - ctx, cancel := context.WithCancel(ctx) - s := &Session{ - sw: newSessionWants(broadcastLiveWantsLimit), - tickDelayReqs: make(chan time.Duration), - ctx: ctx, - shutdown: cancel, - sm: sm, - pm: pm, - sprm: sprm, - providerFinder: providerFinder, - sim: sim, - incoming: make(chan op, 128), - latencyTrkr: latencyTracker{}, - notif: notif, - baseTickDelay: time.Millisecond * 500, - id: id, - initialSearchDelay: initialSearchDelay, - periodicSearchDelay: periodicSearchDelay, - self: self, - } - s.sws = newSessionWantSender(id, pm, sprm, sm, bpm, s.onWantsSent, s.onPeersExhausted) - - go s.run(ctx) - - return s -} - -func (s *Session) ID() uint64 { - return s.id -} - -func (s *Session) Shutdown() { - s.shutdown() -} - -// ReceiveFrom receives incoming blocks from the given peer. -func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { - // The SessionManager tells each Session about all keys that it may be - // interested in. Here the Session filters the keys to the ones that this - // particular Session is interested in. - interestedRes := s.sim.FilterSessionInterested(s.id, ks, haves, dontHaves) - ks = interestedRes[0] - haves = interestedRes[1] - dontHaves = interestedRes[2] - s.logReceiveFrom(from, ks, haves, dontHaves) - - // Inform the session want sender that a message has been received - s.sws.Update(from, ks, haves, dontHaves) - - if len(ks) == 0 { - return - } - - // Inform the session that blocks have been received - select { - case s.incoming <- op{op: opReceive, keys: ks}: - case <-s.ctx.Done(): - } -} - -func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { - // Save some CPU cycles if log level is higher than debug - if ce := sflog.Check(zap.DebugLevel, "Bitswap <- rcv message"); ce == nil { - return - } - - for _, c := range interestedKs { - log.Debugw("Bitswap <- block", "local", s.self, "from", from, "cid", c, "session", s.id) - } - for _, c := range haves { - log.Debugw("Bitswap <- HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) - } - for _, c := range dontHaves { - log.Debugw("Bitswap <- DONT_HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) - } -} - -// GetBlock fetches a single block. -func (s *Session) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { - ctx, span := internal.StartSpan(ctx, "Session.GetBlock") - defer span.End() - return bsgetter.SyncGetBlock(ctx, k, s.GetBlocks) -} - -// GetBlocks fetches a set of blocks within the context of this session and -// returns a channel that found blocks will be returned on. No order is -// guaranteed on the returned blocks. -func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { - ctx, span := internal.StartSpan(ctx, "Session.GetBlocks") - defer span.End() - - return bsgetter.AsyncGetBlocks(ctx, s.ctx, keys, s.notif, - func(ctx context.Context, keys []cid.Cid) { - select { - case s.incoming <- op{op: opWant, keys: keys}: - case <-ctx.Done(): - case <-s.ctx.Done(): - } - }, - func(keys []cid.Cid) { - select { - case s.incoming <- op{op: opCancel, keys: keys}: - case <-s.ctx.Done(): - } - }, - ) -} - -// SetBaseTickDelay changes the rate at which ticks happen. -func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { - select { - case s.tickDelayReqs <- baseTickDelay: - case <-s.ctx.Done(): - } -} - -// onWantsSent is called when wants are sent to a peer by the session wants sender -func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { - allBlks := append(wantBlocks[:len(wantBlocks):len(wantBlocks)], wantHaves...) - s.nonBlockingEnqueue(op{op: opWantsSent, keys: allBlks}) -} - -// onPeersExhausted is called when all available peers have sent DONT_HAVE for -// a set of cids (or all peers become unavailable) -func (s *Session) onPeersExhausted(ks []cid.Cid) { - s.nonBlockingEnqueue(op{op: opBroadcast, keys: ks}) -} - -// We don't want to block the sessionWantSender if the incoming channel -// is full. So if we can't immediately send on the incoming channel spin -// it off into a go-routine. -func (s *Session) nonBlockingEnqueue(o op) { - select { - case s.incoming <- o: - default: - go func() { - select { - case s.incoming <- o: - case <-s.ctx.Done(): - } - }() - } -} - -// Session run loop -- everything in this function should not be called -// outside of this loop -func (s *Session) run(ctx context.Context) { - go s.sws.Run() - - s.idleTick = time.NewTimer(s.initialSearchDelay) - s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) - for { - select { - case oper := <-s.incoming: - switch oper.op { - case opReceive: - // Received blocks - s.handleReceive(oper.keys) - case opWant: - // Client wants blocks - s.wantBlocks(ctx, oper.keys) - case opCancel: - // Wants were cancelled - s.sw.CancelPending(oper.keys) - s.sws.Cancel(oper.keys) - case opWantsSent: - // Wants were sent to a peer - s.sw.WantsSent(oper.keys) - case opBroadcast: - // Broadcast want-haves to all peers - s.broadcast(ctx, oper.keys) - default: - panic("unhandled operation") - } - case <-s.idleTick.C: - // The session hasn't received blocks for a while, broadcast - s.broadcast(ctx, nil) - case <-s.periodicSearchTimer.C: - // Periodically search for a random live want - s.handlePeriodicSearch(ctx) - case baseTickDelay := <-s.tickDelayReqs: - // Set the base tick delay - s.baseTickDelay = baseTickDelay - case <-ctx.Done(): - // Shutdown - s.handleShutdown() - return - } - } -} - -// Called when the session hasn't received any blocks for some time, or when -// all peers in the session have sent DONT_HAVE for a particular set of CIDs. -// Send want-haves to all connected peers, and search for new peers with the CID. -func (s *Session) broadcast(ctx context.Context, wants []cid.Cid) { - // If this broadcast is because of an idle timeout (we haven't received - // any blocks for a while) then broadcast all pending wants - if wants == nil { - wants = s.sw.PrepareBroadcast() - } - - // Broadcast a want-have for the live wants to everyone we're connected to - s.broadcastWantHaves(ctx, wants) - - // do not find providers on consecutive ticks - // -- just rely on periodic search widening - if len(wants) > 0 && (s.consecutiveTicks == 0) { - // Search for providers who have the first want in the list. - // Typically if the provider has the first block they will have - // the rest of the blocks also. - log.Debugw("FindMorePeers", "session", s.id, "cid", wants[0], "pending", len(wants)) - s.findMorePeers(ctx, wants[0]) - } - s.resetIdleTick() - - // If we have live wants record a consecutive tick - if s.sw.HasLiveWants() { - s.consecutiveTicks++ - } -} - -// handlePeriodicSearch is called periodically to search for providers of a -// randomly chosen CID in the sesssion. -func (s *Session) handlePeriodicSearch(ctx context.Context) { - randomWant := s.sw.RandomLiveWant() - if !randomWant.Defined() { - return - } - - // TODO: come up with a better strategy for determining when to search - // for new providers for blocks. - s.findMorePeers(ctx, randomWant) - - s.broadcastWantHaves(ctx, []cid.Cid{randomWant}) - - s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) -} - -// findMorePeers attempts to find more peers for a session by searching for -// providers for the given Cid -func (s *Session) findMorePeers(ctx context.Context, c cid.Cid) { - go func(k cid.Cid) { - for p := range s.providerFinder.FindProvidersAsync(ctx, k) { - // When a provider indicates that it has a cid, it's equivalent to - // the providing peer sending a HAVE - s.sws.Update(p, nil, []cid.Cid{c}, nil) - } - }(c) -} - -// handleShutdown is called when the session shuts down -func (s *Session) handleShutdown() { - // Stop the idle timer - s.idleTick.Stop() - // Shut down the session peer manager - s.sprm.Shutdown() - // Shut down the sessionWantSender (blocks until sessionWantSender stops - // sending) - s.sws.Shutdown() - // Signal to the SessionManager that the session has been shutdown - // and can be cleaned up - s.sm.RemoveSession(s.id) -} - -// handleReceive is called when the session receives blocks from a peer -func (s *Session) handleReceive(ks []cid.Cid) { - // Record which blocks have been received and figure out the total latency - // for fetching the blocks - wanted, totalLatency := s.sw.BlocksReceived(ks) - if len(wanted) == 0 { - return - } - - // Record latency - s.latencyTrkr.receiveUpdate(len(wanted), totalLatency) - - // Inform the SessionInterestManager that this session is no longer - // expecting to receive the wanted keys - s.sim.RemoveSessionWants(s.id, wanted) - - s.idleTick.Stop() - - // We've received new wanted blocks, so reset the number of ticks - // that have occurred since the last new block - s.consecutiveTicks = 0 - - s.resetIdleTick() -} - -// wantBlocks is called when blocks are requested by the client -func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { - if len(newks) > 0 { - // Inform the SessionInterestManager that this session is interested in the keys - s.sim.RecordSessionInterest(s.id, newks) - // Tell the sessionWants tracker that that the wants have been requested - s.sw.BlocksRequested(newks) - // Tell the sessionWantSender that the blocks have been requested - s.sws.Add(newks) - } - - // If we have discovered peers already, the sessionWantSender will - // send wants to them - if s.sprm.PeersDiscovered() { - return - } - - // No peers discovered yet, broadcast some want-haves - ks := s.sw.GetNextWants() - if len(ks) > 0 { - log.Infow("No peers - broadcasting", "session", s.id, "want-count", len(ks)) - s.broadcastWantHaves(ctx, ks) - } -} - -// Send want-haves to all connected peers -func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { - log.Debugw("broadcastWantHaves", "session", s.id, "cids", wants) - s.pm.BroadcastWantHaves(ctx, wants) -} - -// The session will broadcast if it has outstanding wants and doesn't receive -// any blocks for some time. -// The length of time is calculated -// - initially -// as a fixed delay -// - once some blocks are received -// from a base delay and average latency, with a backoff -func (s *Session) resetIdleTick() { - var tickDelay time.Duration - if !s.latencyTrkr.hasLatency() { - tickDelay = s.initialSearchDelay - } else { - avLat := s.latencyTrkr.averageLatency() - tickDelay = s.baseTickDelay + (3 * avLat) - } - tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) - s.idleTick.Reset(tickDelay) -} - -// latencyTracker keeps track of the average latency between sending a want -// and receiving the corresponding block -type latencyTracker struct { - totalLatency time.Duration - count int -} - -func (lt *latencyTracker) hasLatency() bool { - return lt.totalLatency > 0 && lt.count > 0 -} - -func (lt *latencyTracker) averageLatency() time.Duration { - return lt.totalLatency / time.Duration(lt.count) -} - -func (lt *latencyTracker) receiveUpdate(count int, totalLatency time.Duration) { - lt.totalLatency += totalLatency - lt.count += count -} diff --git a/client/internal/session/session_test.go b/client/internal/session/session_test.go deleted file mode 100644 index e7ab8737..00000000 --- a/client/internal/session/session_test.go +++ /dev/null @@ -1,592 +0,0 @@ -package session - -import ( - "context" - "sync" - "testing" - "time" - - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" - bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/go-bitswap/internal/testutil" - cid "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - delay "github.com/ipfs/go-ipfs-delay" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -type mockSessionMgr struct { - lk sync.Mutex - removeSession bool - cancels []cid.Cid -} - -func newMockSessionMgr() *mockSessionMgr { - return &mockSessionMgr{} -} - -func (msm *mockSessionMgr) removeSessionCalled() bool { - msm.lk.Lock() - defer msm.lk.Unlock() - return msm.removeSession -} - -func (msm *mockSessionMgr) cancelled() []cid.Cid { - msm.lk.Lock() - defer msm.lk.Unlock() - return msm.cancels -} - -func (msm *mockSessionMgr) RemoveSession(sesid uint64) { - msm.lk.Lock() - defer msm.lk.Unlock() - msm.removeSession = true -} - -func (msm *mockSessionMgr) CancelSessionWants(sid uint64, wants []cid.Cid) { - msm.lk.Lock() - defer msm.lk.Unlock() - msm.cancels = append(msm.cancels, wants...) -} - -func newFakeSessionPeerManager() *bsspm.SessionPeerManager { - return bsspm.New(1, newFakePeerTagger()) -} - -func newFakePeerTagger() *fakePeerTagger { - return &fakePeerTagger{ - protectedPeers: make(map[peer.ID]map[string]struct{}), - } -} - -type fakePeerTagger struct { - lk sync.Mutex - protectedPeers map[peer.ID]map[string]struct{} -} - -func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, val int) {} -func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) {} - -func (fpt *fakePeerTagger) Protect(p peer.ID, tag string) { - fpt.lk.Lock() - defer fpt.lk.Unlock() - - tags, ok := fpt.protectedPeers[p] - if !ok { - tags = make(map[string]struct{}) - fpt.protectedPeers[p] = tags - } - tags[tag] = struct{}{} -} - -func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { - fpt.lk.Lock() - defer fpt.lk.Unlock() - - if tags, ok := fpt.protectedPeers[p]; ok { - delete(tags, tag) - return len(tags) > 0 - } - - return false -} - -func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { - fpt.lk.Lock() - defer fpt.lk.Unlock() - - return len(fpt.protectedPeers[p]) > 0 -} - -type fakeProviderFinder struct { - findMorePeersRequested chan cid.Cid -} - -func newFakeProviderFinder() *fakeProviderFinder { - return &fakeProviderFinder{ - findMorePeersRequested: make(chan cid.Cid, 1), - } -} - -func (fpf *fakeProviderFinder) FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID { - go func() { - select { - case fpf.findMorePeersRequested <- k: - case <-ctx.Done(): - } - }() - - return make(chan peer.ID) -} - -type wantReq struct { - cids []cid.Cid -} - -type fakePeerManager struct { - wantReqs chan wantReq -} - -func newFakePeerManager() *fakePeerManager { - return &fakePeerManager{ - wantReqs: make(chan wantReq, 1), - } -} - -func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} -func (pm *fakePeerManager) UnregisterSession(uint64) {} -func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} -func (pm *fakePeerManager) BroadcastWantHaves(ctx context.Context, cids []cid.Cid) { - select { - case pm.wantReqs <- wantReq{cids}: - case <-ctx.Done(): - } -} -func (pm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) {} - -func TestSessionGetBlocks(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - fpm := newFakePeerManager() - fspm := newFakeSessionPeerManager() - fpf := newFakeProviderFinder() - sim := bssim.New() - bpm := bsbpm.New() - notif := notifications.New() - defer notif.Shutdown() - id := testutil.GenerateSessionID() - sm := newMockSessionMgr() - session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") - blockGenerator := blocksutil.NewBlockGenerator() - blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) - var cids []cid.Cid - for _, block := range blks { - cids = append(cids, block.Cid()) - } - - _, err := session.GetBlocks(ctx, cids) - - if err != nil { - t.Fatal("error getting blocks") - } - - // Wait for initial want request - receivedWantReq := <-fpm.wantReqs - - // Should have registered session's interest in blocks - intSes := sim.FilterSessionInterested(id, cids) - if !testutil.MatchKeysIgnoreOrder(intSes[0], cids) { - t.Fatal("did not register session interest in blocks") - } - - // Should have sent out broadcast request for wants - if len(receivedWantReq.cids) != broadcastLiveWantsLimit { - t.Fatal("did not enqueue correct initial number of wants") - } - - // Simulate receiving HAVEs from several peers - peers := testutil.GeneratePeers(5) - for i, p := range peers { - blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] - session.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{blk.Cid()}, []cid.Cid{}) - } - - time.Sleep(10 * time.Millisecond) - - // Verify new peers were recorded - if !testutil.MatchPeersIgnoreOrder(fspm.Peers(), peers) { - t.Fatal("peers not recorded by the peer manager") - } - - // Verify session still wants received blocks - _, unwanted := sim.SplitWantedUnwanted(blks) - if len(unwanted) > 0 { - t.Fatal("all blocks should still be wanted") - } - - // Simulate receiving DONT_HAVE for a CID - session.ReceiveFrom(peers[0], []cid.Cid{}, []cid.Cid{}, []cid.Cid{blks[0].Cid()}) - - time.Sleep(10 * time.Millisecond) - - // Verify session still wants received blocks - _, unwanted = sim.SplitWantedUnwanted(blks) - if len(unwanted) > 0 { - t.Fatal("all blocks should still be wanted") - } - - // Simulate receiving block for a CID - session.ReceiveFrom(peers[1], []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) - - time.Sleep(10 * time.Millisecond) - - // Verify session no longer wants received block - wanted, unwanted := sim.SplitWantedUnwanted(blks) - if len(unwanted) != 1 || !unwanted[0].Cid().Equals(blks[0].Cid()) { - t.Fatal("session wants block that has already been received") - } - if len(wanted) != len(blks)-1 { - t.Fatal("session wants incorrect number of blocks") - } - - // Shut down session - cancel() - - time.Sleep(10 * time.Millisecond) - - // Verify session was removed - if !sm.removeSessionCalled() { - t.Fatal("expected session to be removed") - } -} - -func TestSessionFindMorePeers(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) - defer cancel() - fpm := newFakePeerManager() - fspm := newFakeSessionPeerManager() - fpf := newFakeProviderFinder() - sim := bssim.New() - bpm := bsbpm.New() - notif := notifications.New() - defer notif.Shutdown() - id := testutil.GenerateSessionID() - sm := newMockSessionMgr() - session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") - session.SetBaseTickDelay(200 * time.Microsecond) - blockGenerator := blocksutil.NewBlockGenerator() - blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) - var cids []cid.Cid - for _, block := range blks { - cids = append(cids, block.Cid()) - } - _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } - - // The session should initially broadcast want-haves - select { - case <-fpm.wantReqs: - case <-ctx.Done(): - t.Fatal("Did not make first want request ") - } - - // receive a block to trigger a tick reset - time.Sleep(20 * time.Millisecond) // need to make sure some latency registers - // or there will be no tick set -- time precision on Windows in go is in the - // millisecond range - p := testutil.GeneratePeers(1)[0] - - blk := blks[0] - session.ReceiveFrom(p, []cid.Cid{blk.Cid()}, []cid.Cid{}, []cid.Cid{}) - - // The session should now time out waiting for a response and broadcast - // want-haves again - select { - case <-fpm.wantReqs: - case <-ctx.Done(): - t.Fatal("Did not make second want request ") - } - - // The session should keep broadcasting periodically until it receives a response - select { - case receivedWantReq := <-fpm.wantReqs: - if len(receivedWantReq.cids) != broadcastLiveWantsLimit { - t.Fatal("did not rebroadcast whole live list") - } - // Make sure the first block is not included because it has already - // been received - for _, c := range receivedWantReq.cids { - if c.Equals(cids[0]) { - t.Fatal("should not braodcast block that was already received") - } - } - case <-ctx.Done(): - t.Fatal("Never rebroadcast want list") - } - - // The session should eventually try to find more peers - select { - case <-fpf.findMorePeersRequested: - case <-ctx.Done(): - t.Fatal("Did not find more peers") - } -} - -func TestSessionOnPeersExhausted(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - defer cancel() - fpm := newFakePeerManager() - fspm := newFakeSessionPeerManager() - fpf := newFakeProviderFinder() - - sim := bssim.New() - bpm := bsbpm.New() - notif := notifications.New() - defer notif.Shutdown() - id := testutil.GenerateSessionID() - sm := newMockSessionMgr() - session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") - blockGenerator := blocksutil.NewBlockGenerator() - blks := blockGenerator.Blocks(broadcastLiveWantsLimit + 5) - var cids []cid.Cid - for _, block := range blks { - cids = append(cids, block.Cid()) - } - _, err := session.GetBlocks(ctx, cids) - - if err != nil { - t.Fatal("error getting blocks") - } - - // Wait for initial want request - receivedWantReq := <-fpm.wantReqs - - // Should have sent out broadcast request for wants - if len(receivedWantReq.cids) != broadcastLiveWantsLimit { - t.Fatal("did not enqueue correct initial number of wants") - } - - // Signal that all peers have send DONT_HAVE for two of the wants - session.onPeersExhausted(cids[len(cids)-2:]) - - // Wait for want request - receivedWantReq = <-fpm.wantReqs - - // Should have sent out broadcast request for wants - if len(receivedWantReq.cids) != 2 { - t.Fatal("did not enqueue correct initial number of wants") - } -} - -func TestSessionFailingToGetFirstBlock(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - fpm := newFakePeerManager() - fspm := newFakeSessionPeerManager() - fpf := newFakeProviderFinder() - sim := bssim.New() - bpm := bsbpm.New() - notif := notifications.New() - defer notif.Shutdown() - id := testutil.GenerateSessionID() - sm := newMockSessionMgr() - session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") - blockGenerator := blocksutil.NewBlockGenerator() - blks := blockGenerator.Blocks(4) - var cids []cid.Cid - for _, block := range blks { - cids = append(cids, block.Cid()) - } - startTick := time.Now() - _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } - - // The session should initially broadcast want-haves - select { - case <-fpm.wantReqs: - case <-ctx.Done(): - t.Fatal("Did not make first want request ") - } - - // Verify a broadcast was made - select { - case receivedWantReq := <-fpm.wantReqs: - if len(receivedWantReq.cids) < len(cids) { - t.Fatal("did not rebroadcast whole live list") - } - case <-ctx.Done(): - t.Fatal("Never rebroadcast want list") - } - - // Wait for a request to find more peers to occur - select { - case k := <-fpf.findMorePeersRequested: - if testutil.IndexOf(blks, k) == -1 { - t.Fatal("did not rebroadcast an active want") - } - case <-ctx.Done(): - t.Fatal("Did not find more peers") - } - firstTickLength := time.Since(startTick) - - // Wait for another broadcast to occur - select { - case receivedWantReq := <-fpm.wantReqs: - if len(receivedWantReq.cids) < len(cids) { - t.Fatal("did not rebroadcast whole live list") - } - case <-ctx.Done(): - t.Fatal("Never rebroadcast want list") - } - - // Wait for another broadcast to occur - startTick = time.Now() - select { - case receivedWantReq := <-fpm.wantReqs: - if len(receivedWantReq.cids) < len(cids) { - t.Fatal("did not rebroadcast whole live list") - } - case <-ctx.Done(): - t.Fatal("Never rebroadcast want list") - } - - // Tick should take longer - consecutiveTickLength := time.Since(startTick) - if firstTickLength > consecutiveTickLength { - t.Fatal("Should have increased tick length after first consecutive tick") - } - - // Wait for another broadcast to occur - startTick = time.Now() - select { - case receivedWantReq := <-fpm.wantReqs: - if len(receivedWantReq.cids) < len(cids) { - t.Fatal("did not rebroadcast whole live list") - } - case <-ctx.Done(): - t.Fatal("Never rebroadcast want list") - } - - // Tick should take longer - secondConsecutiveTickLength := time.Since(startTick) - if consecutiveTickLength > secondConsecutiveTickLength { - t.Fatal("Should have increased tick length after first consecutive tick") - } - - // Should not have tried to find peers on consecutive ticks - select { - case <-fpf.findMorePeersRequested: - t.Fatal("Should not have tried to find peers on consecutive ticks") - default: - } - - // Wait for rebroadcast to occur - select { - case k := <-fpf.findMorePeersRequested: - if testutil.IndexOf(blks, k) == -1 { - t.Fatal("did not rebroadcast an active want") - } - case <-ctx.Done(): - t.Fatal("Did not rebroadcast to find more peers") - } -} - -func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { - fpm := newFakePeerManager() - fspm := newFakeSessionPeerManager() - fpf := newFakeProviderFinder() - sim := bssim.New() - bpm := bsbpm.New() - notif := notifications.New() - defer notif.Shutdown() - id := testutil.GenerateSessionID() - sm := newMockSessionMgr() - - // Create a new session with its own context - sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - session := New(sessctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") - - timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - defer timerCancel() - - // Request a block with a new context - blockGenerator := blocksutil.NewBlockGenerator() - blks := blockGenerator.Blocks(1) - getctx, getcancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer getcancel() - - getBlocksCh, err := session.GetBlocks(getctx, []cid.Cid{blks[0].Cid()}) - if err != nil { - t.Fatal("error getting blocks") - } - - // Cancel the session context - sesscancel() - - // Expect the GetBlocks() channel to be closed - select { - case _, ok := <-getBlocksCh: - if ok { - t.Fatal("expected channel to be closed but was not closed") - } - case <-timerCtx.Done(): - t.Fatal("expected channel to be closed before timeout") - } - - time.Sleep(10 * time.Millisecond) - - // Expect RemoveSession to be called - if !sm.removeSessionCalled() { - t.Fatal("expected onShutdown to be called") - } -} - -func TestSessionOnShutdownCalled(t *testing.T) { - fpm := newFakePeerManager() - fspm := newFakeSessionPeerManager() - fpf := newFakeProviderFinder() - sim := bssim.New() - bpm := bsbpm.New() - notif := notifications.New() - defer notif.Shutdown() - id := testutil.GenerateSessionID() - sm := newMockSessionMgr() - - // Create a new session with its own context - sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer sesscancel() - session := New(sessctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") - - // Shutdown the session - session.Shutdown() - - time.Sleep(10 * time.Millisecond) - - // Expect RemoveSession to be called - if !sm.removeSessionCalled() { - t.Fatal("expected onShutdown to be called") - } -} - -func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { - ctx, cancelCtx := context.WithTimeout(context.Background(), 20*time.Millisecond) - fpm := newFakePeerManager() - fspm := newFakeSessionPeerManager() - fpf := newFakeProviderFinder() - - sim := bssim.New() - bpm := bsbpm.New() - notif := notifications.New() - defer notif.Shutdown() - id := testutil.GenerateSessionID() - sm := newMockSessionMgr() - session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") - blockGenerator := blocksutil.NewBlockGenerator() - blks := blockGenerator.Blocks(2) - cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} - - _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } - - // Wait for initial want request - <-fpm.wantReqs - - // Shut down session - cancelCtx() - - // Simulate receiving block for a CID - peer := testutil.GeneratePeers(1)[0] - session.ReceiveFrom(peer, []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) - - time.Sleep(5 * time.Millisecond) - - // If we don't get a panic then the test is considered passing -} diff --git a/client/internal/session/sessionwants.go b/client/internal/session/sessionwants.go deleted file mode 100644 index 0d4ded01..00000000 --- a/client/internal/session/sessionwants.go +++ /dev/null @@ -1,193 +0,0 @@ -package session - -import ( - "fmt" - "math/rand" - "time" - - cid "github.com/ipfs/go-cid" -) - -// liveWantsOrder and liveWants will get out of sync as blocks are received. -// This constant is the maximum amount to allow them to be out of sync before -// cleaning up the ordering array. -const liveWantsOrderGCLimit = 32 - -// sessionWants keeps track of which cids are waiting to be sent out, and which -// peers are "live" - ie, we've sent a request but haven't received a block yet -type sessionWants struct { - // The wants that have not yet been sent out - toFetch *cidQueue - // Wants that have been sent but have not received a response - liveWants map[cid.Cid]time.Time - // The order in which wants were requested - liveWantsOrder []cid.Cid - // The maximum number of want-haves to send in a broadcast - broadcastLimit int -} - -func newSessionWants(broadcastLimit int) sessionWants { - return sessionWants{ - toFetch: newCidQueue(), - liveWants: make(map[cid.Cid]time.Time), - broadcastLimit: broadcastLimit, - } -} - -func (sw *sessionWants) String() string { - return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), len(sw.liveWants)) -} - -// BlocksRequested is called when the client makes a request for blocks -func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { - for _, k := range newWants { - sw.toFetch.Push(k) - } -} - -// GetNextWants is called when the session has not yet discovered peers with -// the blocks that it wants. It moves as many CIDs from the fetch queue to -// the live wants queue as possible (given the broadcast limit). -// Returns the newly live wants. -func (sw *sessionWants) GetNextWants() []cid.Cid { - now := time.Now() - - // Move CIDs from fetch queue to the live wants queue (up to the broadcast - // limit) - currentLiveCount := len(sw.liveWants) - toAdd := sw.broadcastLimit - currentLiveCount - - var live []cid.Cid - for ; toAdd > 0 && sw.toFetch.Len() > 0; toAdd-- { - c := sw.toFetch.Pop() - live = append(live, c) - sw.liveWantsOrder = append(sw.liveWantsOrder, c) - sw.liveWants[c] = now - } - - return live -} - -// WantsSent is called when wants are sent to a peer -func (sw *sessionWants) WantsSent(ks []cid.Cid) { - now := time.Now() - for _, c := range ks { - if _, ok := sw.liveWants[c]; !ok && sw.toFetch.Has(c) { - sw.toFetch.Remove(c) - sw.liveWantsOrder = append(sw.liveWantsOrder, c) - sw.liveWants[c] = now - } - } -} - -// BlocksReceived removes received block CIDs from the live wants list and -// measures latency. It returns the CIDs of blocks that were actually -// wanted (as opposed to duplicates) and the total latency for all incoming blocks. -func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) { - wanted := make([]cid.Cid, 0, len(ks)) - totalLatency := time.Duration(0) - if len(ks) == 0 { - return wanted, totalLatency - } - - // Filter for blocks that were actually wanted (as opposed to duplicates) - now := time.Now() - for _, c := range ks { - if sw.isWanted(c) { - wanted = append(wanted, c) - - // Measure latency - sentAt, ok := sw.liveWants[c] - if ok && !sentAt.IsZero() { - totalLatency += now.Sub(sentAt) - } - - // Remove the CID from the live wants / toFetch queue - delete(sw.liveWants, c) - sw.toFetch.Remove(c) - } - } - - // If the live wants ordering array is a long way out of sync with the - // live wants map, clean up the ordering array - if len(sw.liveWantsOrder)-len(sw.liveWants) > liveWantsOrderGCLimit { - cleaned := sw.liveWantsOrder[:0] - for _, c := range sw.liveWantsOrder { - if _, ok := sw.liveWants[c]; ok { - cleaned = append(cleaned, c) - } - } - sw.liveWantsOrder = cleaned - } - - return wanted, totalLatency -} - -// PrepareBroadcast saves the current time for each live want and returns the -// live want CIDs up to the broadcast limit. -func (sw *sessionWants) PrepareBroadcast() []cid.Cid { - now := time.Now() - live := make([]cid.Cid, 0, len(sw.liveWants)) - for _, c := range sw.liveWantsOrder { - if _, ok := sw.liveWants[c]; ok { - // No response was received for the want, so reset the sent time - // to now as we're about to broadcast - sw.liveWants[c] = now - - live = append(live, c) - if len(live) == sw.broadcastLimit { - break - } - } - } - - return live -} - -// CancelPending removes the given CIDs from the fetch queue. -func (sw *sessionWants) CancelPending(keys []cid.Cid) { - for _, k := range keys { - sw.toFetch.Remove(k) - } -} - -// LiveWants returns a list of live wants -func (sw *sessionWants) LiveWants() []cid.Cid { - live := make([]cid.Cid, 0, len(sw.liveWants)) - for c := range sw.liveWants { - live = append(live, c) - } - - return live -} - -// RandomLiveWant returns a randomly selected live want -func (sw *sessionWants) RandomLiveWant() cid.Cid { - if len(sw.liveWants) == 0 { - return cid.Cid{} - } - - // picking a random live want - i := rand.Intn(len(sw.liveWants)) - for k := range sw.liveWants { - if i == 0 { - return k - } - i-- - } - return cid.Cid{} -} - -// Has live wants indicates if there are any live wants -func (sw *sessionWants) HasLiveWants() bool { - return len(sw.liveWants) > 0 -} - -// Indicates whether the want is in either of the fetch or live queues -func (sw *sessionWants) isWanted(c cid.Cid) bool { - _, ok := sw.liveWants[c] - if !ok { - ok = sw.toFetch.Has(c) - } - return ok -} diff --git a/client/internal/session/sessionwants_test.go b/client/internal/session/sessionwants_test.go deleted file mode 100644 index b6e6c94f..00000000 --- a/client/internal/session/sessionwants_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package session - -import ( - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" - cid "github.com/ipfs/go-cid" -) - -func TestEmptySessionWants(t *testing.T) { - sw := newSessionWants(broadcastLiveWantsLimit) - - // Expect these functions to return nothing on a new sessionWants - lws := sw.PrepareBroadcast() - if len(lws) > 0 { - t.Fatal("expected no broadcast wants") - } - lws = sw.LiveWants() - if len(lws) > 0 { - t.Fatal("expected no live wants") - } - if sw.HasLiveWants() { - t.Fatal("expected not to have live wants") - } - rw := sw.RandomLiveWant() - if rw.Defined() { - t.Fatal("expected no random want") - } -} - -func TestSessionWants(t *testing.T) { - sw := newSessionWants(5) - cids := testutil.GenerateCids(10) - others := testutil.GenerateCids(1) - - // Add 10 new wants - // toFetch Live - // 9876543210 - sw.BlocksRequested(cids) - - // Get next wants with a limit of 5 - // The first 5 cids should go move into the live queue - // toFetch Live - // 98765 43210 - nextw := sw.GetNextWants() - if len(nextw) != 5 { - t.Fatal("expected 5 next wants") - } - lws := sw.PrepareBroadcast() - if len(lws) != 5 { - t.Fatal("expected 5 broadcast wants", len(lws)) - } - lws = sw.LiveWants() - if len(lws) != 5 { - t.Fatal("expected 5 live wants") - } - if !sw.HasLiveWants() { - t.Fatal("expected to have live wants") - } - rw := sw.RandomLiveWant() - if !rw.Defined() { - t.Fatal("expected random want") - } - - // Two wanted blocks and one other block are received. - // The wanted blocks should be removed from the live wants queue - // (the other block CID should be ignored) - // toFetch Live - // 98765 432__ - recvdCids := []cid.Cid{cids[0], cids[1], others[0]} - sw.BlocksReceived(recvdCids) - lws = sw.LiveWants() - if len(lws) != 3 { - t.Fatal("expected 3 live wants") - } - - // Ask for next wants with a limit of 5 - // Should move 2 wants from toFetch queue to live wants - // toFetch Live - // 987__ 65432 - nextw = sw.GetNextWants() - if len(nextw) != 2 { - t.Fatal("expected 2 next wants") - } - lws = sw.LiveWants() - if len(lws) != 5 { - t.Fatal("expected 5 live wants") - } - - // One wanted block and one dup block are received. - // The wanted block should be removed from the live - // wants queue. - // toFetch Live - // 987 654_2 - recvdCids = []cid.Cid{cids[0], cids[3]} - sw.BlocksReceived(recvdCids) - lws = sw.LiveWants() - if len(lws) != 4 { - t.Fatal("expected 4 live wants") - } - - // One block in the toFetch queue should be cancelled - // toFetch Live - // 9_7 654_2 - sw.CancelPending([]cid.Cid{cids[8]}) - lws = sw.LiveWants() - if len(lws) != 4 { - t.Fatal("expected 4 live wants") - } -} - -func TestPrepareBroadcast(t *testing.T) { - sw := newSessionWants(3) - cids := testutil.GenerateCids(10) - - // Add 6 new wants - // toFetch Live - // 543210 - sw.BlocksRequested(cids[:6]) - - // Get next wants with a limit of 3 - // The first 3 cids should go move into the live queue - // toFetch Live - // 543 210 - sw.GetNextWants() - - // Broadcast should contain wants in order - for i := 0; i < 10; i++ { - ws := sw.PrepareBroadcast() - if len(ws) != 3 { - t.Fatal("should broadcast all live wants") - } - for idx, c := range ws { - if !c.Equals(cids[idx]) { - t.Fatal("broadcast should always return wants in order") - } - } - } - - // One block received - // Remove a cid from the live queue - sw.BlocksReceived(cids[:1]) - // toFetch Live - // 543 21_ - - // Add 4 new wants - // toFetch Live - // 9876543 21 - sw.BlocksRequested(cids[6:]) - - // 2 Wants sent - // toFetch Live - // 98765 4321 - sw.WantsSent(cids[3:5]) - - // Broadcast should contain wants in order - cids = cids[1:] - for i := 0; i < 10; i++ { - ws := sw.PrepareBroadcast() - if len(ws) != 3 { - t.Fatal("should broadcast live wants up to limit", len(ws), len(cids)) - } - for idx, c := range ws { - if !c.Equals(cids[idx]) { - t.Fatal("broadcast should always return wants in order") - } - } - } -} - -// Test that even after GC broadcast returns correct wants -func TestPrepareBroadcastAfterGC(t *testing.T) { - sw := newSessionWants(5) - cids := testutil.GenerateCids(liveWantsOrderGCLimit * 2) - - sw.BlocksRequested(cids) - - // Trigger a sessionWants internal GC of the live wants - sw.BlocksReceived(cids[:liveWantsOrderGCLimit+1]) - cids = cids[:liveWantsOrderGCLimit+1] - - // Broadcast should contain wants in order - ws := sw.PrepareBroadcast() - for i, c := range ws { - if !c.Equals(cids[i]) { - t.Fatal("broadcast should always return wants in order") - } - } -} diff --git a/client/internal/session/sessionwantsender.go b/client/internal/session/sessionwantsender.go deleted file mode 100644 index 9286d90e..00000000 --- a/client/internal/session/sessionwantsender.go +++ /dev/null @@ -1,766 +0,0 @@ -package session - -import ( - "context" - - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -const ( - // Maximum number of changes to accept before blocking - changesBufferSize = 128 - // If the session receives this many DONT_HAVEs in a row from a peer, - // it prunes the peer from the session - peerDontHaveLimit = 16 -) - -// BlockPresence indicates whether a peer has a block. -// Note that the order is important, we decide which peer to send a want to -// based on knowing whether peer has the block. eg we're more likely to send -// a want to a peer that has the block than a peer that doesnt have the block -// so BPHave > BPDontHave -type BlockPresence int - -const ( - BPDontHave BlockPresence = iota - BPUnknown - BPHave -) - -// SessionWantsCanceller provides a method to cancel wants -type SessionWantsCanceller interface { - // Cancel wants for this session - CancelSessionWants(sid uint64, wants []cid.Cid) -} - -// update encapsulates a message received by the session -type update struct { - // Which peer sent the update - from peer.ID - // cids of blocks received - ks []cid.Cid - // HAVE message - haves []cid.Cid - // DONT_HAVE message - dontHaves []cid.Cid -} - -// peerAvailability indicates a peer's connection state -type peerAvailability struct { - target peer.ID - available bool -} - -// change can be new wants, a new message received by the session, -// or a change in the connect status of a peer -type change struct { - // new wants requested - add []cid.Cid - // wants cancelled - cancel []cid.Cid - // new message received by session (blocks / HAVEs / DONT_HAVEs) - update update - // peer has connected / disconnected - availability peerAvailability -} - -type onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) -type onPeersExhaustedFn func([]cid.Cid) - -// sessionWantSender is responsible for sending want-have and want-block to -// peers. For each want, it sends a single optimistic want-block request to -// one peer and want-have requests to all other peers in the session. -// To choose the best peer for the optimistic want-block it maintains a list -// of how peers have responded to each want (HAVE / DONT_HAVE / Unknown) and -// consults the peer response tracker (records which peers sent us blocks). -type sessionWantSender struct { - // The context is used when sending wants - ctx context.Context - // Called to shutdown the sessionWantSender - shutdown func() - // The sessionWantSender uses the closed channel to signal when it's - // finished shutting down - closed chan struct{} - // The session ID - sessionID uint64 - // A channel that collects incoming changes (events) - changes chan change - // Information about each want indexed by CID - wants map[cid.Cid]*wantInfo - // Keeps track of how many consecutive DONT_HAVEs a peer has sent - peerConsecutiveDontHaves map[peer.ID]int - // Tracks which peers we have send want-block to - swbt *sentWantBlocksTracker - // Tracks the number of blocks each peer sent us - peerRspTrkr *peerResponseTracker - // Sends wants to peers - pm PeerManager - // Keeps track of peers in the session - spm SessionPeerManager - // Cancels wants - canceller SessionWantsCanceller - // Keeps track of which peer has / doesn't have a block - bpm *bsbpm.BlockPresenceManager - // Called when wants are sent - onSend onSendFn - // Called when all peers explicitly don't have a block - onPeersExhausted onPeersExhaustedFn -} - -func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, canceller SessionWantsCanceller, - bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { - - ctx, cancel := context.WithCancel(context.Background()) - sws := sessionWantSender{ - ctx: ctx, - shutdown: cancel, - closed: make(chan struct{}), - sessionID: sid, - changes: make(chan change, changesBufferSize), - wants: make(map[cid.Cid]*wantInfo), - peerConsecutiveDontHaves: make(map[peer.ID]int), - swbt: newSentWantBlocksTracker(), - peerRspTrkr: newPeerResponseTracker(), - - pm: pm, - spm: spm, - canceller: canceller, - bpm: bpm, - onSend: onSend, - onPeersExhausted: onPeersExhausted, - } - - return sws -} - -func (sws *sessionWantSender) ID() uint64 { - return sws.sessionID -} - -// Add is called when new wants are added to the session -func (sws *sessionWantSender) Add(ks []cid.Cid) { - if len(ks) == 0 { - return - } - sws.addChange(change{add: ks}) -} - -// Cancel is called when a request is cancelled -func (sws *sessionWantSender) Cancel(ks []cid.Cid) { - if len(ks) == 0 { - return - } - sws.addChange(change{cancel: ks}) -} - -// Update is called when the session receives a message with incoming blocks -// or HAVE / DONT_HAVE -func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { - hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 - if !hasUpdate { - return - } - - sws.addChange(change{ - update: update{from, ks, haves, dontHaves}, - }) -} - -// SignalAvailability is called by the PeerManager to signal that a peer has -// connected / disconnected -func (sws *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { - availability := peerAvailability{p, isAvailable} - // Add the change in a non-blocking manner to avoid the possibility of a - // deadlock - sws.addChangeNonBlocking(change{availability: availability}) -} - -// Run is the main loop for processing incoming changes -func (sws *sessionWantSender) Run() { - for { - select { - case ch := <-sws.changes: - sws.onChange([]change{ch}) - case <-sws.ctx.Done(): - // Unregister the session with the PeerManager - sws.pm.UnregisterSession(sws.sessionID) - - // Close the 'closed' channel to signal to Shutdown() that the run - // loop has exited - close(sws.closed) - return - } - } -} - -// Shutdown the sessionWantSender -func (sws *sessionWantSender) Shutdown() { - // Signal to the run loop to stop processing - sws.shutdown() - // Wait for run loop to complete - <-sws.closed -} - -// addChange adds a new change to the queue -func (sws *sessionWantSender) addChange(c change) { - select { - case sws.changes <- c: - case <-sws.ctx.Done(): - } -} - -// addChangeNonBlocking adds a new change to the queue, using a go-routine -// if the change blocks, so as to avoid potential deadlocks -func (sws *sessionWantSender) addChangeNonBlocking(c change) { - select { - case sws.changes <- c: - default: - // changes channel is full, so add change in a go routine instead - go func() { - select { - case sws.changes <- c: - case <-sws.ctx.Done(): - } - }() - } -} - -// collectChanges collects all the changes that have occurred since the last -// invocation of onChange -func (sws *sessionWantSender) collectChanges(changes []change) []change { - for len(changes) < changesBufferSize { - select { - case next := <-sws.changes: - changes = append(changes, next) - default: - return changes - } - } - return changes -} - -// onChange processes the next set of changes -func (sws *sessionWantSender) onChange(changes []change) { - // Several changes may have been recorded since the last time we checked, - // so pop all outstanding changes from the channel - changes = sws.collectChanges(changes) - - // Apply each change - availability := make(map[peer.ID]bool, len(changes)) - cancels := make([]cid.Cid, 0) - var updates []update - for _, chng := range changes { - // Initialize info for new wants - for _, c := range chng.add { - sws.trackWant(c) - } - - // Remove cancelled wants - for _, c := range chng.cancel { - sws.untrackWant(c) - cancels = append(cancels, c) - } - - // Consolidate updates and changes to availability - if chng.update.from != "" { - // If the update includes blocks or haves, treat it as signaling that - // the peer is available - if len(chng.update.ks) > 0 || len(chng.update.haves) > 0 { - p := chng.update.from - availability[p] = true - - // Register with the PeerManager - sws.pm.RegisterSession(p, sws) - } - - updates = append(updates, chng.update) - } - if chng.availability.target != "" { - availability[chng.availability.target] = chng.availability.available - } - } - - // Update peer availability - newlyAvailable, newlyUnavailable := sws.processAvailability(availability) - - // Update wants - dontHaves := sws.processUpdates(updates) - - // Check if there are any wants for which all peers have indicated they - // don't have the want - sws.checkForExhaustedWants(dontHaves, newlyUnavailable) - - // If there are any cancels, send them - if len(cancels) > 0 { - sws.canceller.CancelSessionWants(sws.sessionID, cancels) - } - - // If there are some connected peers, send any pending wants - if sws.spm.HasPeers() { - sws.sendNextWants(newlyAvailable) - } -} - -// processAvailability updates the want queue with any changes in -// peer availability -// It returns the peers that have become -// - newly available -// - newly unavailable -func (sws *sessionWantSender) processAvailability(availability map[peer.ID]bool) (avail []peer.ID, unavail []peer.ID) { - var newlyAvailable []peer.ID - var newlyUnavailable []peer.ID - for p, isNowAvailable := range availability { - stateChange := false - if isNowAvailable { - isNewPeer := sws.spm.AddPeer(p) - if isNewPeer { - stateChange = true - newlyAvailable = append(newlyAvailable, p) - } - } else { - wasAvailable := sws.spm.RemovePeer(p) - if wasAvailable { - stateChange = true - newlyUnavailable = append(newlyUnavailable, p) - } - } - - // If the state has changed - if stateChange { - sws.updateWantsPeerAvailability(p, isNowAvailable) - // Reset the count of consecutive DONT_HAVEs received from the - // peer - delete(sws.peerConsecutiveDontHaves, p) - } - } - - return newlyAvailable, newlyUnavailable -} - -// trackWant creates a new entry in the map of CID -> want info -func (sws *sessionWantSender) trackWant(c cid.Cid) { - if _, ok := sws.wants[c]; ok { - return - } - - // Create the want info - wi := newWantInfo(sws.peerRspTrkr) - sws.wants[c] = wi - - // For each available peer, register any information we know about - // whether the peer has the block - for _, p := range sws.spm.Peers() { - sws.updateWantBlockPresence(c, p) - } -} - -// untrackWant removes an entry from the map of CID -> want info -func (sws *sessionWantSender) untrackWant(c cid.Cid) { - delete(sws.wants, c) -} - -// processUpdates processes incoming blocks and HAVE / DONT_HAVEs. -// It returns all DONT_HAVEs. -func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { - // Process received blocks keys - blkCids := cid.NewSet() - for _, upd := range updates { - for _, c := range upd.ks { - blkCids.Add(c) - - // Remove the want - removed := sws.removeWant(c) - if removed != nil { - // Inform the peer tracker that this peer was the first to send - // us the block - sws.peerRspTrkr.receivedBlockFrom(upd.from) - - // Protect the connection to this peer so that we can ensure - // that the connection doesn't get pruned by the connection - // manager - sws.spm.ProtectConnection(upd.from) - } - delete(sws.peerConsecutiveDontHaves, upd.from) - } - } - - // Process received DONT_HAVEs - dontHaves := cid.NewSet() - prunePeers := make(map[peer.ID]struct{}) - for _, upd := range updates { - for _, c := range upd.dontHaves { - // Track the number of consecutive DONT_HAVEs each peer receives - if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { - prunePeers[upd.from] = struct{}{} - } else { - sws.peerConsecutiveDontHaves[upd.from]++ - } - - // If we already received a block for the want, there's no need to - // update block presence etc - if blkCids.Has(c) { - continue - } - - dontHaves.Add(c) - - // Update the block presence for the peer - sws.updateWantBlockPresence(c, upd.from) - - // Check if the DONT_HAVE is in response to a want-block - // (could also be in response to want-have) - if sws.swbt.haveSentWantBlockTo(upd.from, c) { - // If we were waiting for a response from this peer, clear - // sentTo so that we can send the want to another peer - if sentTo, ok := sws.getWantSentTo(c); ok && sentTo == upd.from { - sws.setWantSentTo(c, "") - } - } - } - } - - // Process received HAVEs - for _, upd := range updates { - for _, c := range upd.haves { - // If we haven't already received a block for the want - if !blkCids.Has(c) { - // Update the block presence for the peer - sws.updateWantBlockPresence(c, upd.from) - } - - // Clear the consecutive DONT_HAVE count for the peer - delete(sws.peerConsecutiveDontHaves, upd.from) - delete(prunePeers, upd.from) - } - } - - // If any peers have sent us too many consecutive DONT_HAVEs, remove them - // from the session - for p := range prunePeers { - // Before removing the peer from the session, check if the peer - // sent us a HAVE for a block that we want - for c := range sws.wants { - if sws.bpm.PeerHasBlock(p, c) { - delete(prunePeers, p) - break - } - } - } - if len(prunePeers) > 0 { - go func() { - for p := range prunePeers { - // Peer doesn't have anything we want, so remove it - log.Infof("peer %s sent too many dont haves, removing from session %d", p, sws.ID()) - sws.SignalAvailability(p, false) - } - }() - } - - return dontHaves.Keys() -} - -// checkForExhaustedWants checks if there are any wants for which all peers -// have sent a DONT_HAVE. We call these "exhausted" wants. -func (sws *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyUnavailable []peer.ID) { - // If there are no new DONT_HAVEs, and no peers became unavailable, then - // we don't need to check for exhausted wants - if len(dontHaves) == 0 && len(newlyUnavailable) == 0 { - return - } - - // We need to check each want for which we just received a DONT_HAVE - wants := dontHaves - - // If a peer just became unavailable, then we need to check all wants - // (because it may be the last peer who hadn't sent a DONT_HAVE for a CID) - if len(newlyUnavailable) > 0 { - // Collect all pending wants - wants = make([]cid.Cid, len(sws.wants)) - for c := range sws.wants { - wants = append(wants, c) - } - - // If the last available peer in the session has become unavailable - // then we need to broadcast all pending wants - if !sws.spm.HasPeers() { - sws.processExhaustedWants(wants) - return - } - } - - // If all available peers for a cid sent a DONT_HAVE, signal to the session - // that we've exhausted available peers - if len(wants) > 0 { - exhausted := sws.bpm.AllPeersDoNotHaveBlock(sws.spm.Peers(), wants) - sws.processExhaustedWants(exhausted) - } -} - -// processExhaustedWants filters the list so that only those wants that haven't -// already been marked as exhausted are passed to onPeersExhausted() -func (sws *sessionWantSender) processExhaustedWants(exhausted []cid.Cid) { - newlyExhausted := sws.newlyExhausted(exhausted) - if len(newlyExhausted) > 0 { - sws.onPeersExhausted(newlyExhausted) - } -} - -// convenience structs for passing around want-blocks and want-haves for a peer -type wantSets struct { - wantBlocks *cid.Set - wantHaves *cid.Set -} - -type allWants map[peer.ID]*wantSets - -func (aw allWants) forPeer(p peer.ID) *wantSets { - if _, ok := aw[p]; !ok { - aw[p] = &wantSets{ - wantBlocks: cid.NewSet(), - wantHaves: cid.NewSet(), - } - } - return aw[p] -} - -// sendNextWants sends wants to peers according to the latest information -// about which peers have / dont have blocks -func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { - toSend := make(allWants) - - for c, wi := range sws.wants { - // Ensure we send want-haves to any newly available peers - for _, p := range newlyAvailable { - toSend.forPeer(p).wantHaves.Add(c) - } - - // We already sent a want-block to a peer and haven't yet received a - // response yet - if wi.sentTo != "" { - continue - } - - // All the peers have indicated that they don't have the block - // corresponding to this want, so we must wait to discover more peers - if wi.bestPeer == "" { - // TODO: work this out in real time instead of using bestP? - continue - } - - // Record that we are sending a want-block for this want to the peer - sws.setWantSentTo(c, wi.bestPeer) - - // Send a want-block to the chosen peer - toSend.forPeer(wi.bestPeer).wantBlocks.Add(c) - - // Send a want-have to each other peer - for _, op := range sws.spm.Peers() { - if op != wi.bestPeer { - toSend.forPeer(op).wantHaves.Add(c) - } - } - } - - // Send any wants we've collected - sws.sendWants(toSend) -} - -// sendWants sends want-have and want-blocks to the appropriate peers -func (sws *sessionWantSender) sendWants(sends allWants) { - // For each peer we're sending a request to - for p, snd := range sends { - // Piggyback some other want-haves onto the request to the peer - for _, c := range sws.getPiggybackWantHaves(p, snd.wantBlocks) { - snd.wantHaves.Add(c) - } - - // Send the wants to the peer. - // Note that the PeerManager ensures that we don't sent duplicate - // want-haves / want-blocks to a peer, and that want-blocks take - // precedence over want-haves. - wblks := snd.wantBlocks.Keys() - whaves := snd.wantHaves.Keys() - sws.pm.SendWants(sws.ctx, p, wblks, whaves) - - // Inform the session that we've sent the wants - sws.onSend(p, wblks, whaves) - - // Record which peers we send want-block to - sws.swbt.addSentWantBlocksTo(p, wblks) - } -} - -// getPiggybackWantHaves gets the want-haves that should be piggybacked onto -// a request that we are making to send want-blocks to a peer -func (sws *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.Set) []cid.Cid { - var whs []cid.Cid - for c := range sws.wants { - // Don't send want-have if we're already sending a want-block - // (or have previously) - if !wantBlocks.Has(c) && !sws.swbt.haveSentWantBlockTo(p, c) { - whs = append(whs, c) - } - } - return whs -} - -// newlyExhausted filters the list of keys for wants that have not already -// been marked as exhausted (all peers indicated they don't have the block) -func (sws *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { - var res []cid.Cid - for _, c := range ks { - if wi, ok := sws.wants[c]; ok { - if !wi.exhausted { - res = append(res, c) - wi.exhausted = true - } - } - } - return res -} - -// removeWant is called when the corresponding block is received -func (sws *sessionWantSender) removeWant(c cid.Cid) *wantInfo { - if wi, ok := sws.wants[c]; ok { - delete(sws.wants, c) - return wi - } - return nil -} - -// updateWantsPeerAvailability is called when the availability changes for a -// peer. It updates all the wants accordingly. -func (sws *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvailable bool) { - for c, wi := range sws.wants { - if isNowAvailable { - sws.updateWantBlockPresence(c, p) - } else { - wi.removePeer(p) - } - } -} - -// updateWantBlockPresence is called when a HAVE / DONT_HAVE is received for the given -// want / peer -func (sws *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { - wi, ok := sws.wants[c] - if !ok { - return - } - - // If the peer sent us a HAVE or DONT_HAVE for the cid, adjust the - // block presence for the peer / cid combination - if sws.bpm.PeerHasBlock(p, c) { - wi.setPeerBlockPresence(p, BPHave) - } else if sws.bpm.PeerDoesNotHaveBlock(p, c) { - wi.setPeerBlockPresence(p, BPDontHave) - } else { - wi.setPeerBlockPresence(p, BPUnknown) - } -} - -// Which peer was the want sent to -func (sws *sessionWantSender) getWantSentTo(c cid.Cid) (peer.ID, bool) { - if wi, ok := sws.wants[c]; ok { - return wi.sentTo, true - } - return "", false -} - -// Record which peer the want was sent to -func (sws *sessionWantSender) setWantSentTo(c cid.Cid, p peer.ID) { - if wi, ok := sws.wants[c]; ok { - wi.sentTo = p - } -} - -// wantInfo keeps track of the information for a want -type wantInfo struct { - // Tracks HAVE / DONT_HAVE sent to us for the want by each peer - blockPresence map[peer.ID]BlockPresence - // The peer that we've sent a want-block to (cleared when we get a response) - sentTo peer.ID - // The "best" peer to send the want to next - bestPeer peer.ID - // Keeps track of how many hits / misses each peer has sent us for wants - // in the session - peerRspTrkr *peerResponseTracker - // true if all known peers have sent a DONT_HAVE for this want - exhausted bool -} - -// func newWantInfo(prt *peerResponseTracker, c cid.Cid, startIndex int) *wantInfo { -func newWantInfo(prt *peerResponseTracker) *wantInfo { - return &wantInfo{ - blockPresence: make(map[peer.ID]BlockPresence), - peerRspTrkr: prt, - exhausted: false, - } -} - -// setPeerBlockPresence sets the block presence for the given peer -func (wi *wantInfo) setPeerBlockPresence(p peer.ID, bp BlockPresence) { - wi.blockPresence[p] = bp - wi.calculateBestPeer() - - // If a peer informed us that it has a block then make sure the want is no - // longer flagged as exhausted (exhausted means no peers have the block) - if bp == BPHave { - wi.exhausted = false - } -} - -// removePeer deletes the given peer from the want info -func (wi *wantInfo) removePeer(p peer.ID) { - // If we were waiting to hear back from the peer that is being removed, - // clear the sentTo field so we no longer wait - if p == wi.sentTo { - wi.sentTo = "" - } - delete(wi.blockPresence, p) - wi.calculateBestPeer() -} - -// calculateBestPeer finds the best peer to send the want to next -func (wi *wantInfo) calculateBestPeer() { - // Recalculate the best peer - bestBP := BPDontHave - bestPeer := peer.ID("") - - // Find the peer with the best block presence, recording how many peers - // share the block presence - countWithBest := 0 - for p, bp := range wi.blockPresence { - if bp > bestBP { - bestBP = bp - bestPeer = p - countWithBest = 1 - } else if bp == bestBP { - countWithBest++ - } - } - wi.bestPeer = bestPeer - - // If no peer has a block presence better than DONT_HAVE, bail out - if bestPeer == "" { - return - } - - // If there was only one peer with the best block presence, we're done - if countWithBest <= 1 { - return - } - - // There were multiple peers with the best block presence, so choose one of - // them to be the best - var peersWithBest []peer.ID - for p, bp := range wi.blockPresence { - if bp == bestBP { - peersWithBest = append(peersWithBest, p) - } - } - wi.bestPeer = wi.peerRspTrkr.choose(peersWithBest) -} diff --git a/client/internal/session/sessionwantsender_test.go b/client/internal/session/sessionwantsender_test.go deleted file mode 100644 index 733be5a4..00000000 --- a/client/internal/session/sessionwantsender_test.go +++ /dev/null @@ -1,913 +0,0 @@ -package session - -import ( - "context" - "sync" - "testing" - "time" - - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/go-bitswap/internal/testutil" - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -type sentWants struct { - sync.Mutex - p peer.ID - wantHaves *cid.Set - wantBlocks *cid.Set -} - -func (sw *sentWants) add(wantBlocks []cid.Cid, wantHaves []cid.Cid) { - sw.Lock() - defer sw.Unlock() - - for _, c := range wantBlocks { - sw.wantBlocks.Add(c) - } - for _, c := range wantHaves { - if !sw.wantBlocks.Has(c) { - sw.wantHaves.Add(c) - } - } - -} -func (sw *sentWants) wantHavesKeys() []cid.Cid { - sw.Lock() - defer sw.Unlock() - return sw.wantHaves.Keys() -} -func (sw *sentWants) wantBlocksKeys() []cid.Cid { - sw.Lock() - defer sw.Unlock() - return sw.wantBlocks.Keys() -} - -type mockPeerManager struct { - lk sync.Mutex - peerSessions map[peer.ID]bspm.Session - peerSends map[peer.ID]*sentWants -} - -func newMockPeerManager() *mockPeerManager { - return &mockPeerManager{ - peerSessions: make(map[peer.ID]bspm.Session), - peerSends: make(map[peer.ID]*sentWants), - } -} - -func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) { - pm.lk.Lock() - defer pm.lk.Unlock() - - pm.peerSessions[p] = sess -} - -func (pm *mockPeerManager) has(p peer.ID, sid uint64) bool { - pm.lk.Lock() - defer pm.lk.Unlock() - - if session, ok := pm.peerSessions[p]; ok { - return session.ID() == sid - } - return false -} - -func (*mockPeerManager) UnregisterSession(uint64) {} -func (*mockPeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} -func (*mockPeerManager) SendCancels(context.Context, []cid.Cid) {} - -func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { - pm.lk.Lock() - defer pm.lk.Unlock() - - sw, ok := pm.peerSends[p] - if !ok { - sw = &sentWants{p: p, wantHaves: cid.NewSet(), wantBlocks: cid.NewSet()} - pm.peerSends[p] = sw - } - sw.add(wantBlocks, wantHaves) -} - -func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { - time.Sleep(10 * time.Millisecond) - - pm.lk.Lock() - defer pm.lk.Unlock() - nw := make(map[peer.ID]*sentWants) - for p, sentWants := range pm.peerSends { - nw[p] = sentWants - } - return nw -} - -func (pm *mockPeerManager) clearWants() { - pm.lk.Lock() - defer pm.lk.Unlock() - - for p := range pm.peerSends { - delete(pm.peerSends, p) - } -} - -type exhaustedPeers struct { - lk sync.Mutex - ks []cid.Cid -} - -func (ep *exhaustedPeers) onPeersExhausted(ks []cid.Cid) { - ep.lk.Lock() - defer ep.lk.Unlock() - - ep.ks = append(ep.ks, ks...) -} - -func (ep *exhaustedPeers) clear() { - ep.lk.Lock() - defer ep.lk.Unlock() - - ep.ks = nil -} - -func (ep *exhaustedPeers) exhausted() []cid.Cid { - ep.lk.Lock() - defer ep.lk.Unlock() - - return append([]cid.Cid{}, ep.ks...) -} - -func TestSendWants(t *testing.T) { - cids := testutil.GenerateCids(4) - peers := testutil.GeneratePeers(1) - peerA := peers[0] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // add cid0, cid1 - blkCids0 := cids[0:2] - spm.Add(blkCids0) - // peerA: HAVE cid0 - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) - - // Wait for processing to complete - peerSends := pm.waitNextWants() - - // Should have sent - // peerA: want-block cid0, cid1 - sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { - t.Fatal("Wrong keys") - } - if len(sw.wantHavesKeys()) > 0 { - t.Fatal("Expecting no want-haves") - } -} - -func TestSendsWantBlockToOnePeerOnly(t *testing.T) { - cids := testutil.GenerateCids(4) - peers := testutil.GeneratePeers(2) - peerA := peers[0] - peerB := peers[1] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // add cid0, cid1 - blkCids0 := cids[0:2] - spm.Add(blkCids0) - // peerA: HAVE cid0 - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) - - // Wait for processing to complete - peerSends := pm.waitNextWants() - - // Should have sent - // peerA: want-block cid0, cid1 - sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { - t.Fatal("Wrong keys") - } - - // Clear wants (makes keeping track of what's been sent easier) - pm.clearWants() - - // peerB: HAVE cid0 - spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) - - // Wait for processing to complete - peerSends = pm.waitNextWants() - - // Have not received response from peerA, so should not send want-block to - // peerB. Should have sent - // peerB: want-have cid0, cid1 - sw, ok = peerSends[peerB] - if !ok { - t.Fatal("Nothing sent to peer") - } - if sw.wantBlocks.Len() > 0 { - t.Fatal("Expecting no want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantHavesKeys(), blkCids0) { - t.Fatal("Wrong keys") - } -} - -func TestReceiveBlock(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) - peerA := peers[0] - peerB := peers[1] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // add cid0, cid1 - spm.Add(cids) - // peerA: HAVE cid0 - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) - - // Wait for processing to complete - peerSends := pm.waitNextWants() - - // Should have sent - // peerA: want-block cid0, cid1 - sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { - t.Fatal("Wrong keys") - } - - // Clear wants (makes keeping track of what's been sent easier) - pm.clearWants() - - // peerA: block cid0, DONT_HAVE cid1 - bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) - spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{cids[1]}) - // peerB: HAVE cid0, cid1 - bpm.ReceiveFrom(peerB, cids, []cid.Cid{}) - spm.Update(peerB, []cid.Cid{}, cids, []cid.Cid{}) - - // Wait for processing to complete - peerSends = pm.waitNextWants() - - // Should have sent - // peerB: want-block cid1 - // (should not have sent want-block for cid0 because block0 has already - // been received) - sw, ok = peerSends[peerB] - if !ok { - t.Fatal("Nothing sent to peer") - } - wb := sw.wantBlocksKeys() - if len(wb) != 1 || !wb[0].Equals(cids[1]) { - t.Fatal("Wrong keys", wb) - } -} - -func TestCancelWants(t *testing.T) { - cids := testutil.GenerateCids(4) - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // add cid0, cid1, cid2 - blkCids := cids[0:3] - spm.Add(blkCids) - - time.Sleep(5 * time.Millisecond) - - // cancel cid0, cid2 - cancelCids := []cid.Cid{cids[0], cids[2]} - spm.Cancel(cancelCids) - - // Wait for processing to complete - time.Sleep(5 * time.Millisecond) - - // Should have sent cancels for cid0, cid2 - sent := swc.cancelled() - if !testutil.MatchKeysIgnoreOrder(sent, cancelCids) { - t.Fatal("Wrong keys") - } -} - -func TestRegisterSessionWithPeerManager(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) - peerA := peers[0] - peerB := peers[1] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // peerA: HAVE cid0 - spm.Update(peerA, nil, cids[:1], nil) - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Expect session to have been registered with PeerManager - if !pm.has(peerA, sid) { - t.Fatal("Expected HAVE to register session with PeerManager") - } - - // peerB: block cid1 - spm.Update(peerB, cids[1:], nil, nil) - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Expect session to have been registered with PeerManager - if !pm.has(peerB, sid) { - t.Fatal("Expected HAVE to register session with PeerManager") - } -} - -func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(3) - peerA := peers[0] - peerB := peers[1] - peerC := peers[2] - sid := uint64(1) - pm := newMockPeerManager() - fpt := newFakePeerTagger() - fpm := bsspm.New(1, fpt) - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // add cid0 - spm.Add(cids[:1]) - - // peerA: block cid0 - spm.Update(peerA, cids[:1], nil, nil) - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Expect peer A to be protected as it was first to send the block - if !fpt.isProtected(peerA) { - t.Fatal("Expected first peer to send block to have protected connection") - } - - // peerB: block cid0 - spm.Update(peerB, cids[:1], nil, nil) - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Expect peer B not to be protected as it was not first to send the block - if fpt.isProtected(peerB) { - t.Fatal("Expected peer not to be protected") - } - - // peerC: block cid1 - spm.Update(peerC, cids[1:], nil, nil) - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Expect peer C not to be protected as we didn't want the block it sent - if fpt.isProtected(peerC) { - t.Fatal("Expected peer not to be protected") - } -} - -func TestPeerUnavailable(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) - peerA := peers[0] - peerB := peers[1] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // add cid0, cid1 - spm.Add(cids) - // peerA: HAVE cid0 - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) - - // Wait for processing to complete - peerSends := pm.waitNextWants() - - // Should have sent - // peerA: want-block cid0, cid1 - sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { - t.Fatal("Wrong keys") - } - - // Clear wants (makes keeping track of what's been sent easier) - pm.clearWants() - - // peerB: HAVE cid0 - spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) - - // Wait for processing to complete - peerSends = pm.waitNextWants() - - // Should not have sent anything because want-blocks were already sent to - // peer A - sw, ok = peerSends[peerB] - if ok && sw.wantBlocks.Len() > 0 { - t.Fatal("Expected no wants sent to peer") - } - - // peerA becomes unavailable - spm.SignalAvailability(peerA, false) - - // Wait for processing to complete - peerSends = pm.waitNextWants() - - // Should now have sent want-block cid0, cid1 to peerB - sw, ok = peerSends[peerB] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { - t.Fatal("Wrong keys") - } -} - -func TestPeersExhausted(t *testing.T) { - cids := testutil.GenerateCids(3) - peers := testutil.GeneratePeers(2) - peerA := peers[0] - peerB := peers[1] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - - ep := exhaustedPeers{} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) - - go spm.Run() - - // add cid0, cid1 - spm.Add(cids) - - // peerA: HAVE cid0 - bpm.ReceiveFrom(peerA, []cid.Cid{cids[0]}, []cid.Cid{}) - // Note: this also registers peer A as being available - spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) - - // peerA: DONT_HAVE cid1 - bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) - spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1]}) - - time.Sleep(5 * time.Millisecond) - - // All available peers (peer A) have sent us a DONT_HAVE for cid1, - // so expect that onPeersExhausted() will be called with cid1 - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { - t.Fatal("Wrong keys") - } - - // Clear exhausted cids - ep.clear() - - // peerB: HAVE cid0 - bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) - // Note: this also registers peer B as being available - spm.Update(peerB, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) - - // peerB: DONT_HAVE cid1, cid2 - bpm.ReceiveFrom(peerB, []cid.Cid{}, []cid.Cid{cids[1], cids[2]}) - spm.Update(peerB, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1], cids[2]}) - - // Wait for processing to complete - pm.waitNextWants() - - // All available peers (peer A and peer B) have sent us a DONT_HAVE - // for cid1, but we already called onPeersExhausted with cid1, so it - // should not be called again - if len(ep.exhausted()) > 0 { - t.Fatal("Wrong keys") - } - - // peerA: DONT_HAVE cid2 - bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[2]}) - spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[2]}) - - // Wait for processing to complete - pm.waitNextWants() - - // All available peers (peer A and peer B) have sent us a DONT_HAVE for - // cid2, so expect that onPeersExhausted() will be called with cid2 - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[2]}) { - t.Fatal("Wrong keys") - } -} - -// Tests that when -// - all the peers except one have sent a DONT_HAVE for a CID -// - the remaining peer becomes unavailable -// onPeersExhausted should be sent for that CID -func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) - peerA := peers[0] - peerB := peers[1] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - - ep := exhaustedPeers{} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) - - go spm.Run() - - // add cid0, cid1 - spm.Add(cids) - - // peerA: HAVE cid0 - bpm.ReceiveFrom(peerA, []cid.Cid{cids[0]}, []cid.Cid{}) - // Note: this also registers peer A as being available - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) - // peerB: HAVE cid0 - bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) - // Note: this also registers peer B as being available - spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) - - // peerA: DONT_HAVE cid1 - bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) - spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[0]}) - - time.Sleep(5 * time.Millisecond) - - // peerB: becomes unavailable - spm.SignalAvailability(peerB, false) - - time.Sleep(5 * time.Millisecond) - - // All remaining peers (peer A) have sent us a DONT_HAVE for cid1, - // so expect that onPeersExhausted() will be called with cid1 - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { - t.Fatal("Wrong keys") - } -} - -// Tests that when all the peers are removed from the session -// onPeersExhausted should be called with all outstanding CIDs -func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { - cids := testutil.GenerateCids(3) - peers := testutil.GeneratePeers(2) - peerA := peers[0] - peerB := peers[1] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - - ep := exhaustedPeers{} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) - - go spm.Run() - - // add cid0, cid1, cid2 - spm.Add(cids) - - // peerA: receive block for cid0 (and register peer A with sessionWantSender) - spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) - // peerB: HAVE cid1 - bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) - // Note: this also registers peer B as being available - spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) - - time.Sleep(5 * time.Millisecond) - - // peerA and peerB: become unavailable - spm.SignalAvailability(peerA, false) - spm.SignalAvailability(peerB, false) - - time.Sleep(5 * time.Millisecond) - - // Expect that onPeersExhausted() will be called with all cids for blocks - // that have not been received - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1], cids[2]}) { - t.Fatal("Wrong keys") - } -} - -func TestConsecutiveDontHaveLimit(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // Add all cids as wants - spm.Add(cids) - - // Receive a block from peer (adds it to the session) - spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } - - // Receive DONT_HAVEs from peer that do not exceed limit - for _, c := range cids[1:peerDontHaveLimit] { - bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) - } - - // Wait for processing to complete - time.Sleep(20 * time.Millisecond) - - // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } - - // Receive DONT_HAVEs from peer that exceed limit - for _, c := range cids[peerDontHaveLimit:] { - bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) - } - - // Wait for processing to complete - time.Sleep(20 * time.Millisecond) - - // Session should remove peer - if has := fpm.HasPeer(p); has { - t.Fatal("Expected peer not to be available") - } -} - -func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // Add all cids as wants - spm.Add(cids) - - // Receive a block from peer (adds it to the session) - spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) - - // Wait for processing to complete - time.Sleep(5 * time.Millisecond) - - // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } - - // Receive DONT_HAVE then HAVE then DONT_HAVE from peer, - // where consecutive DONT_HAVEs would have exceeded limit - // (but they are not consecutive) - for _, c := range cids[1:peerDontHaveLimit] { - // DONT_HAVEs - bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) - } - for _, c := range cids[peerDontHaveLimit : peerDontHaveLimit+1] { - // HAVEs - bpm.ReceiveFrom(p, []cid.Cid{c}, []cid.Cid{}) - spm.Update(p, []cid.Cid{}, []cid.Cid{c}, []cid.Cid{}) - } - for _, c := range cids[peerDontHaveLimit+1:] { - // DONT_HAVEs - bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) - } - - // Wait for processing to complete - time.Sleep(5 * time.Millisecond) - - // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } -} - -func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // Add all cids as wants - spm.Add(cids) - - // Receive a block from peer (adds it to the session) - spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) - - // Wait for processing to complete - time.Sleep(5 * time.Millisecond) - - // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } - - // Receive DONT_HAVEs from peer that exceed limit - for _, c := range cids[1 : peerDontHaveLimit+2] { - bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) - } - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Session should remove peer - if has := fpm.HasPeer(p); has { - t.Fatal("Expected peer not to be available") - } - - // Receive a HAVE from peer (adds it back into the session) - bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } - - cids2 := testutil.GenerateCids(peerDontHaveLimit + 10) - - // Receive DONT_HAVEs from peer that don't exceed limit - for _, c := range cids2[1:peerDontHaveLimit] { - bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) - } - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } - - // Receive DONT_HAVEs from peer that exceed limit - for _, c := range cids2[peerDontHaveLimit:] { - bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) - } - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Session should remove peer - if has := fpm.HasPeer(p); has { - t.Fatal("Expected peer not to be available") - } -} - -func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) - pm := newMockPeerManager() - fpm := newFakeSessionPeerManager() - swc := newMockSessionMgr() - bpm := bsbpm.New() - onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) - defer spm.Shutdown() - - go spm.Run() - - // Add all cids as wants - spm.Add(cids) - - // Receive a HAVE from peer (adds it to the session) - bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) - - // Wait for processing to complete - time.Sleep(10 * time.Millisecond) - - // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } - - // Receive DONT_HAVEs from peer that exceed limit - for _, c := range cids[1 : peerDontHaveLimit+5] { - bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) - } - - // Wait for processing to complete - time.Sleep(20 * time.Millisecond) - - // Peer should still be available because it has a block that we want. - // (We received a HAVE for cid 0 but didn't yet receive the block) - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } -} diff --git a/client/internal/session/wantinfo_test.go b/client/internal/session/wantinfo_test.go deleted file mode 100644 index 8397d81f..00000000 --- a/client/internal/session/wantinfo_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package session - -import ( - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" -) - -func TestEmptyWantInfo(t *testing.T) { - wp := newWantInfo(newPeerResponseTracker()) - - if wp.bestPeer != "" { - t.Fatal("expected no best peer") - } -} - -func TestSetPeerBlockPresence(t *testing.T) { - peers := testutil.GeneratePeers(2) - wp := newWantInfo(newPeerResponseTracker()) - - wp.setPeerBlockPresence(peers[0], BPUnknown) - if wp.bestPeer != peers[0] { - t.Fatal("wrong best peer") - } - - wp.setPeerBlockPresence(peers[1], BPHave) - if wp.bestPeer != peers[1] { - t.Fatal("wrong best peer") - } - - wp.setPeerBlockPresence(peers[0], BPDontHave) - if wp.bestPeer != peers[1] { - t.Fatal("wrong best peer") - } -} - -func TestSetPeerBlockPresenceBestLower(t *testing.T) { - peers := testutil.GeneratePeers(2) - wp := newWantInfo(newPeerResponseTracker()) - - wp.setPeerBlockPresence(peers[0], BPHave) - if wp.bestPeer != peers[0] { - t.Fatal("wrong best peer") - } - - wp.setPeerBlockPresence(peers[1], BPUnknown) - if wp.bestPeer != peers[0] { - t.Fatal("wrong best peer") - } - - wp.setPeerBlockPresence(peers[0], BPDontHave) - if wp.bestPeer != peers[1] { - t.Fatal("wrong best peer") - } -} - -func TestRemoveThenSetDontHave(t *testing.T) { - peers := testutil.GeneratePeers(2) - wp := newWantInfo(newPeerResponseTracker()) - - wp.setPeerBlockPresence(peers[0], BPUnknown) - if wp.bestPeer != peers[0] { - t.Fatal("wrong best peer") - } - - wp.removePeer(peers[0]) - if wp.bestPeer != "" { - t.Fatal("wrong best peer") - } - - wp.setPeerBlockPresence(peers[1], BPUnknown) - if wp.bestPeer != peers[1] { - t.Fatal("wrong best peer") - } - - wp.setPeerBlockPresence(peers[0], BPDontHave) - if wp.bestPeer != peers[1] { - t.Fatal("wrong best peer") - } -} diff --git a/client/internal/sessioninterestmanager/sessioninterestmanager.go b/client/internal/sessioninterestmanager/sessioninterestmanager.go deleted file mode 100644 index 0ab32ed1..00000000 --- a/client/internal/sessioninterestmanager/sessioninterestmanager.go +++ /dev/null @@ -1,201 +0,0 @@ -package sessioninterestmanager - -import ( - "sync" - - blocks "github.com/ipfs/go-block-format" - - cid "github.com/ipfs/go-cid" -) - -// SessionInterestManager records the CIDs that each session is interested in. -type SessionInterestManager struct { - lk sync.RWMutex - wants map[cid.Cid]map[uint64]bool -} - -// New initializes a new SessionInterestManager. -func New() *SessionInterestManager { - return &SessionInterestManager{ - // Map of cids -> sessions -> bool - // - // The boolean indicates whether the session still wants the block - // or is just interested in receiving messages about it. - // - // Note that once the block is received the session no longer wants - // the block, but still wants to receive messages from peers who have - // the block as they may have other blocks the session is interested in. - wants: make(map[cid.Cid]map[uint64]bool), - } -} - -// When the client asks the session for blocks, the session calls -// RecordSessionInterest() with those cids. -func (sim *SessionInterestManager) RecordSessionInterest(ses uint64, ks []cid.Cid) { - sim.lk.Lock() - defer sim.lk.Unlock() - - // For each key - for _, c := range ks { - // Record that the session wants the blocks - if want, ok := sim.wants[c]; ok { - want[ses] = true - } else { - sim.wants[c] = map[uint64]bool{ses: true} - } - } -} - -// When the session shuts down it calls RemoveSessionInterest(). -// Returns the keys that no session is interested in any more. -func (sim *SessionInterestManager) RemoveSession(ses uint64) []cid.Cid { - sim.lk.Lock() - defer sim.lk.Unlock() - - // The keys that no session is interested in - deletedKs := make([]cid.Cid, 0) - - // For each known key - for c := range sim.wants { - // Remove the session from the list of sessions that want the key - delete(sim.wants[c], ses) - - // If there are no more sessions that want the key - if len(sim.wants[c]) == 0 { - // Clean up the list memory - delete(sim.wants, c) - // Add the key to the list of keys that no session is interested in - deletedKs = append(deletedKs, c) - } - } - - return deletedKs -} - -// When the session receives blocks, it calls RemoveSessionWants(). -func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, ks []cid.Cid) { - sim.lk.Lock() - defer sim.lk.Unlock() - - // For each key - for _, c := range ks { - // If the session wanted the block - if wanted, ok := sim.wants[c][ses]; ok && wanted { - // Mark the block as unwanted - sim.wants[c][ses] = false - } - } -} - -// When a request is cancelled, the session calls RemoveSessionInterested(). -// Returns the keys that no session is interested in any more. -func (sim *SessionInterestManager) RemoveSessionInterested(ses uint64, ks []cid.Cid) []cid.Cid { - sim.lk.Lock() - defer sim.lk.Unlock() - - // The keys that no session is interested in - deletedKs := make([]cid.Cid, 0, len(ks)) - - // For each key - for _, c := range ks { - // If there is a list of sessions that want the key - if _, ok := sim.wants[c]; ok { - // Remove the session from the list of sessions that want the key - delete(sim.wants[c], ses) - - // If there are no more sessions that want the key - if len(sim.wants[c]) == 0 { - // Clean up the list memory - delete(sim.wants, c) - // Add the key to the list of keys that no session is interested in - deletedKs = append(deletedKs, c) - } - } - } - - return deletedKs -} - -// The session calls FilterSessionInterested() to filter the sets of keys for -// those that the session is interested in -func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ...[]cid.Cid) [][]cid.Cid { - sim.lk.RLock() - defer sim.lk.RUnlock() - - // For each set of keys - kres := make([][]cid.Cid, len(ksets)) - for i, ks := range ksets { - // The set of keys that at least one session is interested in - has := make([]cid.Cid, 0, len(ks)) - - // For each key in the list - for _, c := range ks { - // If there is a session that's interested, add the key to the set - if _, ok := sim.wants[c][ses]; ok { - has = append(has, c) - } - } - kres[i] = has - } - return kres -} - -// When bitswap receives blocks it calls SplitWantedUnwanted() to discard -// unwanted blocks -func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]blocks.Block, []blocks.Block) { - sim.lk.RLock() - defer sim.lk.RUnlock() - - // Get the wanted block keys as a set - wantedKs := cid.NewSet() - for _, b := range blks { - c := b.Cid() - // For each session that is interested in the key - for ses := range sim.wants[c] { - // If the session wants the key (rather than just being interested) - if wanted, ok := sim.wants[c][ses]; ok && wanted { - // Add the key to the set - wantedKs.Add(c) - } - } - } - - // Separate the blocks into wanted and unwanted - wantedBlks := make([]blocks.Block, 0, len(blks)) - notWantedBlks := make([]blocks.Block, 0) - for _, b := range blks { - if wantedKs.Has(b.Cid()) { - wantedBlks = append(wantedBlks, b) - } else { - notWantedBlks = append(notWantedBlks, b) - } - } - return wantedBlks, notWantedBlks -} - -// When the SessionManager receives a message it calls InterestedSessions() to -// find out which sessions are interested in the message. -func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []uint64 { - sim.lk.RLock() - defer sim.lk.RUnlock() - - ks := make([]cid.Cid, 0, len(blks)+len(haves)+len(dontHaves)) - ks = append(ks, blks...) - ks = append(ks, haves...) - ks = append(ks, dontHaves...) - - // Create a set of sessions that are interested in the keys - sesSet := make(map[uint64]struct{}) - for _, c := range ks { - for s := range sim.wants[c] { - sesSet[s] = struct{}{} - } - } - - // Convert the set into a list - ses := make([]uint64, 0, len(sesSet)) - for s := range sesSet { - ses = append(ses, s) - } - return ses -} diff --git a/client/internal/sessioninterestmanager/sessioninterestmanager_test.go b/client/internal/sessioninterestmanager/sessioninterestmanager_test.go deleted file mode 100644 index 0bba6638..00000000 --- a/client/internal/sessioninterestmanager/sessioninterestmanager_test.go +++ /dev/null @@ -1,218 +0,0 @@ -package sessioninterestmanager - -import ( - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" - cid "github.com/ipfs/go-cid" -) - -func TestEmpty(t *testing.T) { - sim := New() - - ses := uint64(1) - cids := testutil.GenerateCids(2) - res := sim.FilterSessionInterested(ses, cids) - if len(res) != 1 || len(res[0]) > 0 { - t.Fatal("Expected no interest") - } - if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) > 0 { - t.Fatal("Expected no interest") - } -} - -func TestBasic(t *testing.T) { - sim := New() - - ses1 := uint64(1) - ses2 := uint64(2) - cids1 := testutil.GenerateCids(2) - cids2 := append(testutil.GenerateCids(1), cids1[1]) - sim.RecordSessionInterest(ses1, cids1) - - res := sim.FilterSessionInterested(ses1, cids1) - if len(res) != 1 || len(res[0]) != 2 { - t.Fatal("Expected 2 keys") - } - if len(sim.InterestedSessions(cids1, []cid.Cid{}, []cid.Cid{})) != 1 { - t.Fatal("Expected 1 session") - } - - sim.RecordSessionInterest(ses2, cids2) - res = sim.FilterSessionInterested(ses2, cids1[:1]) - if len(res) != 1 || len(res[0]) != 0 { - t.Fatal("Expected no interest") - } - res = sim.FilterSessionInterested(ses2, cids2) - if len(res) != 1 || len(res[0]) != 2 { - t.Fatal("Expected 2 keys") - } - - if len(sim.InterestedSessions(cids1[:1], []cid.Cid{}, []cid.Cid{})) != 1 { - t.Fatal("Expected 1 session") - } - if len(sim.InterestedSessions(cids1[1:], []cid.Cid{}, []cid.Cid{})) != 2 { - t.Fatal("Expected 2 sessions") - } -} - -func TestInterestedSessions(t *testing.T) { - sim := New() - - ses := uint64(1) - cids := testutil.GenerateCids(3) - sim.RecordSessionInterest(ses, cids[0:2]) - - if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) != 1 { - t.Fatal("Expected 1 session") - } - if len(sim.InterestedSessions(cids[0:1], []cid.Cid{}, []cid.Cid{})) != 1 { - t.Fatal("Expected 1 session") - } - if len(sim.InterestedSessions([]cid.Cid{}, cids, []cid.Cid{})) != 1 { - t.Fatal("Expected 1 session") - } - if len(sim.InterestedSessions([]cid.Cid{}, cids[0:1], []cid.Cid{})) != 1 { - t.Fatal("Expected 1 session") - } - if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids)) != 1 { - t.Fatal("Expected 1 session") - } - if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids[0:1])) != 1 { - t.Fatal("Expected 1 session") - } -} - -func TestRemoveSession(t *testing.T) { - sim := New() - - ses1 := uint64(1) - ses2 := uint64(2) - cids1 := testutil.GenerateCids(2) - cids2 := append(testutil.GenerateCids(1), cids1[1]) - sim.RecordSessionInterest(ses1, cids1) - sim.RecordSessionInterest(ses2, cids2) - sim.RemoveSession(ses1) - - res := sim.FilterSessionInterested(ses1, cids1) - if len(res) != 1 || len(res[0]) != 0 { - t.Fatal("Expected no interest") - } - - res = sim.FilterSessionInterested(ses2, cids1, cids2) - if len(res) != 2 { - t.Fatal("unexpected results size") - } - if len(res[0]) != 1 { - t.Fatal("Expected 1 key") - } - if len(res[1]) != 2 { - t.Fatal("Expected 2 keys") - } -} - -func TestRemoveSessionInterested(t *testing.T) { - sim := New() - - ses1 := uint64(1) - ses2 := uint64(2) - cids1 := testutil.GenerateCids(2) - cids2 := append(testutil.GenerateCids(1), cids1[1]) - sim.RecordSessionInterest(ses1, cids1) - sim.RecordSessionInterest(ses2, cids2) - - res := sim.RemoveSessionInterested(ses1, []cid.Cid{cids1[0]}) - if len(res) != 1 { - t.Fatal("Expected no interested sessions left") - } - - interested := sim.FilterSessionInterested(ses1, cids1) - if len(interested) != 1 || len(interested[0]) != 1 { - t.Fatal("Expected ses1 still interested in one cid") - } - - res = sim.RemoveSessionInterested(ses1, cids1) - if len(res) != 0 { - t.Fatal("Expected ses2 to be interested in one cid") - } - - interested = sim.FilterSessionInterested(ses1, cids1) - if len(interested) != 1 || len(interested[0]) != 0 { - t.Fatal("Expected ses1 to have no remaining interest") - } - - interested = sim.FilterSessionInterested(ses2, cids1) - if len(interested) != 1 || len(interested[0]) != 1 { - t.Fatal("Expected ses2 to still be interested in one key") - } -} - -func TestSplitWantedUnwanted(t *testing.T) { - blks := testutil.GenerateBlocksOfSize(3, 1024) - sim := New() - ses1 := uint64(1) - ses2 := uint64(2) - - var cids []cid.Cid - for _, b := range blks { - cids = append(cids, b.Cid()) - } - - // ses1: - // ses2: - wanted, unwanted := sim.SplitWantedUnwanted(blks) - if len(wanted) > 0 { - t.Fatal("Expected no blocks") - } - if len(unwanted) != 3 { - t.Fatal("Expected 3 blocks") - } - - // ses1: 0 1 - // ses2: - sim.RecordSessionInterest(ses1, cids[0:2]) - wanted, unwanted = sim.SplitWantedUnwanted(blks) - if len(wanted) != 2 { - t.Fatal("Expected 2 blocks") - } - if len(unwanted) != 1 { - t.Fatal("Expected 1 block") - } - - // ses1: 1 - // ses2: 1 2 - sim.RecordSessionInterest(ses2, cids[1:]) - sim.RemoveSessionWants(ses1, cids[:1]) - - wanted, unwanted = sim.SplitWantedUnwanted(blks) - if len(wanted) != 2 { - t.Fatal("Expected 2 blocks") - } - if len(unwanted) != 1 { - t.Fatal("Expected no blocks") - } - - // ses1: - // ses2: 1 2 - sim.RemoveSessionWants(ses1, cids[1:2]) - - wanted, unwanted = sim.SplitWantedUnwanted(blks) - if len(wanted) != 2 { - t.Fatal("Expected 2 blocks") - } - if len(unwanted) != 1 { - t.Fatal("Expected no blocks") - } - - // ses1: - // ses2: 2 - sim.RemoveSessionWants(ses2, cids[1:2]) - - wanted, unwanted = sim.SplitWantedUnwanted(blks) - if len(wanted) != 1 { - t.Fatal("Expected 2 blocks") - } - if len(unwanted) != 2 { - t.Fatal("Expected 2 blocks") - } -} diff --git a/client/internal/sessionmanager/sessionmanager.go b/client/internal/sessionmanager/sessionmanager.go deleted file mode 100644 index 5ac7a8a0..00000000 --- a/client/internal/sessionmanager/sessionmanager.go +++ /dev/null @@ -1,196 +0,0 @@ -package sessionmanager - -import ( - "context" - "strconv" - "sync" - "time" - - cid "github.com/ipfs/go-cid" - delay "github.com/ipfs/go-ipfs-delay" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/ipfs/go-bitswap/client/internal" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" - bssession "github.com/ipfs/go-bitswap/client/internal/session" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// Session is a session that is managed by the session manager -type Session interface { - exchange.Fetcher - ID() uint64 - ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid, []cid.Cid) - Shutdown() -} - -// SessionFactory generates a new session for the SessionManager to track. -type SessionFactory func( - ctx context.Context, - sm bssession.SessionManager, - id uint64, - sprm bssession.SessionPeerManager, - sim *bssim.SessionInterestManager, - pm bssession.PeerManager, - bpm *bsbpm.BlockPresenceManager, - notif notifications.PubSub, - provSearchDelay time.Duration, - rebroadcastDelay delay.D, - self peer.ID) Session - -// PeerManagerFactory generates a new peer manager for a session. -type PeerManagerFactory func(ctx context.Context, id uint64) bssession.SessionPeerManager - -// SessionManager is responsible for creating, managing, and dispatching to -// sessions. -type SessionManager struct { - ctx context.Context - sessionFactory SessionFactory - sessionInterestManager *bssim.SessionInterestManager - peerManagerFactory PeerManagerFactory - blockPresenceManager *bsbpm.BlockPresenceManager - peerManager bssession.PeerManager - notif notifications.PubSub - - // Sessions - sessLk sync.RWMutex - sessions map[uint64]Session - - // Session Index - sessIDLk sync.Mutex - sessID uint64 - - self peer.ID -} - -// New creates a new SessionManager. -func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestManager *bssim.SessionInterestManager, peerManagerFactory PeerManagerFactory, - blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID) *SessionManager { - - return &SessionManager{ - ctx: ctx, - sessionFactory: sessionFactory, - sessionInterestManager: sessionInterestManager, - peerManagerFactory: peerManagerFactory, - blockPresenceManager: blockPresenceManager, - peerManager: peerManager, - notif: notif, - sessions: make(map[uint64]Session), - self: self, - } -} - -// NewSession initializes a session with the given context, and adds to the -// session manager. -func (sm *SessionManager) NewSession(ctx context.Context, - provSearchDelay time.Duration, - rebroadcastDelay delay.D) exchange.Fetcher { - id := sm.GetNextSessionID() - - ctx, span := internal.StartSpan(ctx, "SessionManager.NewSession", trace.WithAttributes(attribute.String("ID", strconv.FormatUint(id, 10)))) - defer span.End() - - pm := sm.peerManagerFactory(ctx, id) - session := sm.sessionFactory(ctx, sm, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) - - sm.sessLk.Lock() - if sm.sessions != nil { // check if SessionManager was shutdown - sm.sessions[id] = session - } - sm.sessLk.Unlock() - - return session -} - -func (sm *SessionManager) Shutdown() { - sm.sessLk.Lock() - - sessions := make([]Session, 0, len(sm.sessions)) - for _, ses := range sm.sessions { - sessions = append(sessions, ses) - } - - // Ensure that if Shutdown() is called twice we only shut down - // the sessions once - sm.sessions = nil - - sm.sessLk.Unlock() - - for _, ses := range sessions { - ses.Shutdown() - } -} - -func (sm *SessionManager) RemoveSession(sesid uint64) { - // Remove session from SessionInterestManager - returns the keys that no - // session is interested in anymore. - cancelKs := sm.sessionInterestManager.RemoveSession(sesid) - - // Cancel keys that no session is interested in anymore - sm.cancelWants(cancelKs) - - sm.sessLk.Lock() - defer sm.sessLk.Unlock() - - // Clean up session - if sm.sessions != nil { // check if SessionManager was shutdown - delete(sm.sessions, sesid) - } -} - -// GetNextSessionID returns the next sequential identifier for a session. -func (sm *SessionManager) GetNextSessionID() uint64 { - sm.sessIDLk.Lock() - defer sm.sessIDLk.Unlock() - - sm.sessID++ - return sm.sessID -} - -// ReceiveFrom is called when a new message is received -func (sm *SessionManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { - // Record block presence for HAVE / DONT_HAVE - sm.blockPresenceManager.ReceiveFrom(p, haves, dontHaves) - - // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs - for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { - sm.sessLk.RLock() - if sm.sessions == nil { // check if SessionManager was shutdown - sm.sessLk.RUnlock() - return - } - sess, ok := sm.sessions[id] - sm.sessLk.RUnlock() - - if ok { - sess.ReceiveFrom(p, blks, haves, dontHaves) - } - } - - // Send CANCEL to all peers with want-have / want-block - sm.peerManager.SendCancels(ctx, blks) -} - -// CancelSessionWants is called when a session cancels wants because a call to -// GetBlocks() is cancelled -func (sm *SessionManager) CancelSessionWants(sesid uint64, wants []cid.Cid) { - // Remove session's interest in the given blocks - returns the keys that no - // session is interested in anymore. - cancelKs := sm.sessionInterestManager.RemoveSessionInterested(sesid, wants) - sm.cancelWants(cancelKs) -} - -func (sm *SessionManager) cancelWants(wants []cid.Cid) { - // Free up block presence tracking for keys that no session is interested - // in anymore - sm.blockPresenceManager.RemoveKeys(wants) - - // Send CANCEL to all peers for blocks that no session is interested in - // anymore. - // Note: use bitswap context because session context may already be Done. - sm.peerManager.SendCancels(sm.ctx, wants) -} diff --git a/client/internal/sessionmanager/sessionmanager_test.go b/client/internal/sessionmanager/sessionmanager_test.go deleted file mode 100644 index c22028d3..00000000 --- a/client/internal/sessionmanager/sessionmanager_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package sessionmanager - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - delay "github.com/ipfs/go-ipfs-delay" - - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bssession "github.com/ipfs/go-bitswap/client/internal/session" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" - "github.com/ipfs/go-bitswap/internal/testutil" - - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -type fakeSession struct { - ks []cid.Cid - wantBlocks []cid.Cid - wantHaves []cid.Cid - id uint64 - pm *fakeSesPeerManager - sm bssession.SessionManager - notif notifications.PubSub -} - -func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { - return nil, nil -} -func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { - return nil, nil -} -func (fs *fakeSession) ID() uint64 { - return fs.id -} -func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid, wantBlocks []cid.Cid, wantHaves []cid.Cid) { - fs.ks = append(fs.ks, ks...) - fs.wantBlocks = append(fs.wantBlocks, wantBlocks...) - fs.wantHaves = append(fs.wantHaves, wantHaves...) -} -func (fs *fakeSession) Shutdown() { - fs.sm.RemoveSession(fs.id) -} - -type fakeSesPeerManager struct { -} - -func (*fakeSesPeerManager) Peers() []peer.ID { return nil } -func (*fakeSesPeerManager) PeersDiscovered() bool { return false } -func (*fakeSesPeerManager) Shutdown() {} -func (*fakeSesPeerManager) AddPeer(peer.ID) bool { return false } -func (*fakeSesPeerManager) RemovePeer(peer.ID) bool { return false } -func (*fakeSesPeerManager) HasPeers() bool { return false } -func (*fakeSesPeerManager) ProtectConnection(peer.ID) {} - -type fakePeerManager struct { - lk sync.Mutex - cancels []cid.Cid -} - -func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} -func (*fakePeerManager) UnregisterSession(uint64) {} -func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} -func (*fakePeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} -func (fpm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) { - fpm.lk.Lock() - defer fpm.lk.Unlock() - fpm.cancels = append(fpm.cancels, cancels...) -} -func (fpm *fakePeerManager) cancelled() []cid.Cid { - fpm.lk.Lock() - defer fpm.lk.Unlock() - return fpm.cancels -} - -func sessionFactory(ctx context.Context, - sm bssession.SessionManager, - id uint64, - sprm bssession.SessionPeerManager, - sim *bssim.SessionInterestManager, - pm bssession.PeerManager, - bpm *bsbpm.BlockPresenceManager, - notif notifications.PubSub, - provSearchDelay time.Duration, - rebroadcastDelay delay.D, - self peer.ID) Session { - fs := &fakeSession{ - id: id, - pm: sprm.(*fakeSesPeerManager), - sm: sm, - notif: notif, - } - go func() { - <-ctx.Done() - sm.RemoveSession(fs.id) - }() - return fs -} - -func peerManagerFactory(ctx context.Context, id uint64) bssession.SessionPeerManager { - return &fakeSesPeerManager{} -} - -func TestReceiveFrom(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - notif := notifications.New() - defer notif.Shutdown() - sim := bssim.New() - bpm := bsbpm.New() - pm := &fakePeerManager{} - sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") - - p := peer.ID(fmt.Sprint(123)) - block := blocks.NewBlock([]byte("block")) - - firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - - sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) - sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) - - sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) - if len(firstSession.ks) == 0 || - len(secondSession.ks) > 0 || - len(thirdSession.ks) == 0 { - t.Fatal("should have received blocks but didn't") - } - - sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{block.Cid()}, []cid.Cid{}) - if len(firstSession.wantBlocks) == 0 || - len(secondSession.wantBlocks) > 0 || - len(thirdSession.wantBlocks) == 0 { - t.Fatal("should have received want-blocks but didn't") - } - - sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{block.Cid()}) - if len(firstSession.wantHaves) == 0 || - len(secondSession.wantHaves) > 0 || - len(thirdSession.wantHaves) == 0 { - t.Fatal("should have received want-haves but didn't") - } - - if len(pm.cancelled()) != 1 { - t.Fatal("should have sent cancel for received blocks") - } -} - -func TestReceiveBlocksWhenManagerShutdown(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - notif := notifications.New() - defer notif.Shutdown() - sim := bssim.New() - bpm := bsbpm.New() - pm := &fakePeerManager{} - sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") - - p := peer.ID(fmt.Sprint(123)) - block := blocks.NewBlock([]byte("block")) - - firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - - sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) - sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) - sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) - - sm.Shutdown() - - // wait for sessions to get removed - time.Sleep(10 * time.Millisecond) - - sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) - if len(firstSession.ks) > 0 || - len(secondSession.ks) > 0 || - len(thirdSession.ks) > 0 { - t.Fatal("received blocks for sessions after manager is shutdown") - } -} - -func TestReceiveBlocksWhenSessionContextCancelled(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - notif := notifications.New() - defer notif.Shutdown() - sim := bssim.New() - bpm := bsbpm.New() - pm := &fakePeerManager{} - sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") - - p := peer.ID(fmt.Sprint(123)) - block := blocks.NewBlock([]byte("block")) - - firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - sessionCtx, sessionCancel := context.WithCancel(ctx) - secondSession := sm.NewSession(sessionCtx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - - sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) - sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) - sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) - - sessionCancel() - - // wait for sessions to get removed - time.Sleep(10 * time.Millisecond) - - sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) - if len(firstSession.ks) == 0 || - len(secondSession.ks) > 0 || - len(thirdSession.ks) == 0 { - t.Fatal("received blocks for sessions that are canceled") - } -} - -func TestShutdown(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - notif := notifications.New() - defer notif.Shutdown() - sim := bssim.New() - bpm := bsbpm.New() - pm := &fakePeerManager{} - sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") - - p := peer.ID(fmt.Sprint(123)) - block := blocks.NewBlock([]byte("block")) - cids := []cid.Cid{block.Cid()} - firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - sim.RecordSessionInterest(firstSession.ID(), cids) - sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, cids) - - if !bpm.HasKey(block.Cid()) { - t.Fatal("expected cid to be added to block presence manager") - } - - sm.Shutdown() - - // wait for cleanup - time.Sleep(10 * time.Millisecond) - - if bpm.HasKey(block.Cid()) { - t.Fatal("expected cid to be removed from block presence manager") - } - if !testutil.MatchKeysIgnoreOrder(pm.cancelled(), cids) { - t.Fatal("expected cancels to be sent") - } -} diff --git a/client/internal/sessionpeermanager/sessionpeermanager.go b/client/internal/sessionpeermanager/sessionpeermanager.go deleted file mode 100644 index 35784d7b..00000000 --- a/client/internal/sessionpeermanager/sessionpeermanager.go +++ /dev/null @@ -1,150 +0,0 @@ -package sessionpeermanager - -import ( - "fmt" - "sync" - - logging "github.com/ipfs/go-log" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var log = logging.Logger("bs:sprmgr") - -const ( - // Connection Manager tag value for session peers. Indicates to connection - // manager that it should keep the connection to the peer. - sessionPeerTagValue = 5 -) - -// PeerTagger is an interface for tagging peers with metadata -type PeerTagger interface { - TagPeer(peer.ID, string, int) - UntagPeer(p peer.ID, tag string) - Protect(peer.ID, string) - Unprotect(peer.ID, string) bool -} - -// SessionPeerManager keeps track of peers for a session, and takes care of -// ConnectionManager tagging. -type SessionPeerManager struct { - tagger PeerTagger - tag string - - id uint64 - plk sync.RWMutex - peers map[peer.ID]struct{} - peersDiscovered bool -} - -// New creates a new SessionPeerManager -func New(id uint64, tagger PeerTagger) *SessionPeerManager { - return &SessionPeerManager{ - id: id, - tag: fmt.Sprint("bs-ses-", id), - tagger: tagger, - peers: make(map[peer.ID]struct{}), - } -} - -// AddPeer adds the peer to the SessionPeerManager. -// Returns true if the peer is a new peer, false if it already existed. -func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { - spm.plk.Lock() - defer spm.plk.Unlock() - - // Check if the peer is a new peer - if _, ok := spm.peers[p]; ok { - return false - } - - spm.peers[p] = struct{}{} - spm.peersDiscovered = true - - // Tag the peer with the ConnectionManager so it doesn't discard the - // connection - spm.tagger.TagPeer(p, spm.tag, sessionPeerTagValue) - - log.Debugw("Bitswap: Added peer to session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) - return true -} - -// Protect connection to this peer from being pruned by the connection manager -func (spm *SessionPeerManager) ProtectConnection(p peer.ID) { - spm.plk.Lock() - defer spm.plk.Unlock() - - if _, ok := spm.peers[p]; !ok { - return - } - - spm.tagger.Protect(p, spm.tag) -} - -// RemovePeer removes the peer from the SessionPeerManager. -// Returns true if the peer was removed, false if it did not exist. -func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { - spm.plk.Lock() - defer spm.plk.Unlock() - - if _, ok := spm.peers[p]; !ok { - return false - } - - delete(spm.peers, p) - spm.tagger.UntagPeer(p, spm.tag) - spm.tagger.Unprotect(p, spm.tag) - - log.Debugw("Bitswap: removed peer from session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) - return true -} - -// PeersDiscovered indicates whether peers have been discovered yet. -// Returns true once a peer has been discovered by the session (even if all -// peers are later removed from the session). -func (spm *SessionPeerManager) PeersDiscovered() bool { - spm.plk.RLock() - defer spm.plk.RUnlock() - - return spm.peersDiscovered -} - -func (spm *SessionPeerManager) Peers() []peer.ID { - spm.plk.RLock() - defer spm.plk.RUnlock() - - peers := make([]peer.ID, 0, len(spm.peers)) - for p := range spm.peers { - peers = append(peers, p) - } - - return peers -} - -func (spm *SessionPeerManager) HasPeers() bool { - spm.plk.RLock() - defer spm.plk.RUnlock() - - return len(spm.peers) > 0 -} - -func (spm *SessionPeerManager) HasPeer(p peer.ID) bool { - spm.plk.RLock() - defer spm.plk.RUnlock() - - _, ok := spm.peers[p] - return ok -} - -// Shutdown untags all the peers -func (spm *SessionPeerManager) Shutdown() { - spm.plk.Lock() - defer spm.plk.Unlock() - - // Untag the peers with the ConnectionManager so that it can release - // connections to those peers - for p := range spm.peers { - spm.tagger.UntagPeer(p, spm.tag) - spm.tagger.Unprotect(p, spm.tag) - } -} diff --git a/client/internal/sessionpeermanager/sessionpeermanager_test.go b/client/internal/sessionpeermanager/sessionpeermanager_test.go deleted file mode 100644 index ac82362d..00000000 --- a/client/internal/sessionpeermanager/sessionpeermanager_test.go +++ /dev/null @@ -1,302 +0,0 @@ -package sessionpeermanager - -import ( - "sync" - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -type fakePeerTagger struct { - lk sync.Mutex - taggedPeers []peer.ID - protectedPeers map[peer.ID]map[string]struct{} - wait sync.WaitGroup -} - -func newFakePeerTagger() *fakePeerTagger { - return &fakePeerTagger{ - protectedPeers: make(map[peer.ID]map[string]struct{}), - } -} - -func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { - fpt.wait.Add(1) - - fpt.lk.Lock() - defer fpt.lk.Unlock() - fpt.taggedPeers = append(fpt.taggedPeers, p) -} - -func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { - defer fpt.wait.Done() - - fpt.lk.Lock() - defer fpt.lk.Unlock() - for i := 0; i < len(fpt.taggedPeers); i++ { - if fpt.taggedPeers[i] == p { - fpt.taggedPeers[i] = fpt.taggedPeers[len(fpt.taggedPeers)-1] - fpt.taggedPeers = fpt.taggedPeers[:len(fpt.taggedPeers)-1] - return - } - } -} - -func (fpt *fakePeerTagger) Protect(p peer.ID, tag string) { - fpt.lk.Lock() - defer fpt.lk.Unlock() - - tags, ok := fpt.protectedPeers[p] - if !ok { - tags = make(map[string]struct{}) - fpt.protectedPeers[p] = tags - } - tags[tag] = struct{}{} -} - -func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { - fpt.lk.Lock() - defer fpt.lk.Unlock() - - if tags, ok := fpt.protectedPeers[p]; ok { - delete(tags, tag) - if len(tags) == 0 { - delete(fpt.protectedPeers, p) - } - return len(tags) > 0 - } - - return false -} - -func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { - fpt.lk.Lock() - defer fpt.lk.Unlock() - - return len(fpt.protectedPeers[p]) > 0 -} - -func TestAddPeers(t *testing.T) { - peers := testutil.GeneratePeers(2) - spm := New(1, &fakePeerTagger{}) - - isNew := spm.AddPeer(peers[0]) - if !isNew { - t.Fatal("Expected peer to be new") - } - - isNew = spm.AddPeer(peers[0]) - if isNew { - t.Fatal("Expected peer to no longer be new") - } - - isNew = spm.AddPeer(peers[1]) - if !isNew { - t.Fatal("Expected peer to be new") - } -} - -func TestRemovePeers(t *testing.T) { - peers := testutil.GeneratePeers(2) - spm := New(1, &fakePeerTagger{}) - - existed := spm.RemovePeer(peers[0]) - if existed { - t.Fatal("Expected peer not to exist") - } - - spm.AddPeer(peers[0]) - spm.AddPeer(peers[1]) - - existed = spm.RemovePeer(peers[0]) - if !existed { - t.Fatal("Expected peer to exist") - } - existed = spm.RemovePeer(peers[1]) - if !existed { - t.Fatal("Expected peer to exist") - } - existed = spm.RemovePeer(peers[0]) - if existed { - t.Fatal("Expected peer not to have existed") - } -} - -func TestHasPeers(t *testing.T) { - peers := testutil.GeneratePeers(2) - spm := New(1, &fakePeerTagger{}) - - if spm.HasPeers() { - t.Fatal("Expected not to have peers yet") - } - - spm.AddPeer(peers[0]) - if !spm.HasPeers() { - t.Fatal("Expected to have peers") - } - - spm.AddPeer(peers[1]) - if !spm.HasPeers() { - t.Fatal("Expected to have peers") - } - - spm.RemovePeer(peers[0]) - if !spm.HasPeers() { - t.Fatal("Expected to have peers") - } - - spm.RemovePeer(peers[1]) - if spm.HasPeers() { - t.Fatal("Expected to no longer have peers") - } -} - -func TestHasPeer(t *testing.T) { - peers := testutil.GeneratePeers(2) - spm := New(1, &fakePeerTagger{}) - - if spm.HasPeer(peers[0]) { - t.Fatal("Expected not to have peer yet") - } - - spm.AddPeer(peers[0]) - if !spm.HasPeer(peers[0]) { - t.Fatal("Expected to have peer") - } - - spm.AddPeer(peers[1]) - if !spm.HasPeer(peers[1]) { - t.Fatal("Expected to have peer") - } - - spm.RemovePeer(peers[0]) - if spm.HasPeer(peers[0]) { - t.Fatal("Expected not to have peer") - } - - if !spm.HasPeer(peers[1]) { - t.Fatal("Expected to have peer") - } -} - -func TestPeers(t *testing.T) { - peers := testutil.GeneratePeers(2) - spm := New(1, &fakePeerTagger{}) - - if len(spm.Peers()) > 0 { - t.Fatal("Expected not to have peers yet") - } - - spm.AddPeer(peers[0]) - if len(spm.Peers()) != 1 { - t.Fatal("Expected to have one peer") - } - - spm.AddPeer(peers[1]) - if len(spm.Peers()) != 2 { - t.Fatal("Expected to have two peers") - } - - spm.RemovePeer(peers[0]) - if len(spm.Peers()) != 1 { - t.Fatal("Expected to have one peer") - } -} - -func TestPeersDiscovered(t *testing.T) { - peers := testutil.GeneratePeers(2) - spm := New(1, &fakePeerTagger{}) - - if spm.PeersDiscovered() { - t.Fatal("Expected not to have discovered peers yet") - } - - spm.AddPeer(peers[0]) - if !spm.PeersDiscovered() { - t.Fatal("Expected to have discovered peers") - } - - spm.RemovePeer(peers[0]) - if !spm.PeersDiscovered() { - t.Fatal("Expected to still have discovered peers") - } -} - -func TestPeerTagging(t *testing.T) { - peers := testutil.GeneratePeers(2) - fpt := &fakePeerTagger{} - spm := New(1, fpt) - - spm.AddPeer(peers[0]) - if len(fpt.taggedPeers) != 1 { - t.Fatal("Expected to have tagged one peer") - } - - spm.AddPeer(peers[0]) - if len(fpt.taggedPeers) != 1 { - t.Fatal("Expected to have tagged one peer") - } - - spm.AddPeer(peers[1]) - if len(fpt.taggedPeers) != 2 { - t.Fatal("Expected to have tagged two peers") - } - - spm.RemovePeer(peers[1]) - if len(fpt.taggedPeers) != 1 { - t.Fatal("Expected to have untagged peer") - } -} - -func TestProtectConnection(t *testing.T) { - peers := testutil.GeneratePeers(1) - peerA := peers[0] - fpt := newFakePeerTagger() - spm := New(1, fpt) - - // Should not protect connection if peer hasn't been added yet - spm.ProtectConnection(peerA) - if fpt.isProtected(peerA) { - t.Fatal("Expected peer not to be protected") - } - - // Once peer is added, should be able to protect connection - spm.AddPeer(peerA) - spm.ProtectConnection(peerA) - if !fpt.isProtected(peerA) { - t.Fatal("Expected peer to be protected") - } - - // Removing peer should unprotect connection - spm.RemovePeer(peerA) - if fpt.isProtected(peerA) { - t.Fatal("Expected peer to be unprotected") - } -} - -func TestShutdown(t *testing.T) { - peers := testutil.GeneratePeers(2) - fpt := newFakePeerTagger() - spm := New(1, fpt) - - spm.AddPeer(peers[0]) - spm.AddPeer(peers[1]) - if len(fpt.taggedPeers) != 2 { - t.Fatal("Expected to have tagged two peers") - } - - spm.ProtectConnection(peers[0]) - if !fpt.isProtected(peers[0]) { - t.Fatal("Expected peer to be protected") - } - - spm.Shutdown() - - if len(fpt.taggedPeers) != 0 { - t.Fatal("Expected to have untagged all peers") - } - if len(fpt.protectedPeers) != 0 { - t.Fatal("Expected to have unprotected all peers") - } -} diff --git a/client/internal/tracing.go b/client/internal/tracing.go deleted file mode 100644 index aa1f7992..00000000 --- a/client/internal/tracing.go +++ /dev/null @@ -1,13 +0,0 @@ -package internal - -import ( - "context" - "fmt" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) -} diff --git a/client/stat.go b/client/stat.go index 013afec6..df8f6298 100644 --- a/client/stat.go +++ b/client/stat.go @@ -1,30 +1,9 @@ package client import ( - cid "github.com/ipfs/go-cid" + libipfs "github.com/ipfs/go-libipfs/bitswap/client" ) // Stat is a struct that provides various statistics on bitswap operations -type Stat struct { - Wantlist []cid.Cid - BlocksReceived uint64 - DataReceived uint64 - DupBlksReceived uint64 - DupDataReceived uint64 - MessagesReceived uint64 -} - -// Stat returns aggregated statistics about bitswap operations -func (bs *Client) Stat() (st Stat, err error) { - bs.counterLk.Lock() - c := bs.counters - st.BlocksReceived = c.blocksRecvd - st.DupBlksReceived = c.dupBlocksRecvd - st.DupDataReceived = c.dupDataRecvd - st.DataReceived = c.dataRecvd - st.MessagesReceived = c.messagesRecvd - bs.counterLk.Unlock() - st.Wantlist = bs.GetWantlist() - - return st, nil -} +// Deprecated: use github.com/ipfs/go-libipfs/client.Stat instead +type Stat = libipfs.Stat diff --git a/client/wantlist/wantlist.go b/client/wantlist/wantlist.go index da54983e..ed314020 100644 --- a/client/wantlist/wantlist.go +++ b/client/wantlist/wantlist.go @@ -3,140 +3,26 @@ package wantlist import ( - "sort" - - pb "github.com/ipfs/go-bitswap/message/pb" - cid "github.com/ipfs/go-cid" + libipfs "github.com/ipfs/go-libipfs/bitswap/client/wantlist" ) // Wantlist is a raw list of wanted blocks and their priorities -type Wantlist struct { - set map[cid.Cid]Entry - - // Re-computing this can get expensive so we memoize it. - cached []Entry -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client/wantlist.Wantlist instead +type Wantlist = libipfs.Wantlist // Entry is an entry in a want list, consisting of a cid and its priority -type Entry struct { - Cid cid.Cid - Priority int32 - WantType pb.Message_Wantlist_WantType -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client/wantlist.Entry instead +type Entry = libipfs.Entry // NewRefEntry creates a new reference tracked wantlist entry. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client/wantlist.NewRefEntry instead func NewRefEntry(c cid.Cid, p int32) Entry { - return Entry{ - Cid: c, - Priority: p, - WantType: pb.Message_Wantlist_Block, - } + return libipfs.NewRefEntry(c, p) } -type entrySlice []Entry - -func (es entrySlice) Len() int { return len(es) } -func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } -func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } - // New generates a new raw Wantlist +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/client/wantlist.New instead func New() *Wantlist { - return &Wantlist{ - set: make(map[cid.Cid]Entry), - } -} - -// Len returns the number of entries in a wantlist. -func (w *Wantlist) Len() int { - return len(w.set) -} - -// Add adds an entry in a wantlist from CID & Priority, if not already present. -func (w *Wantlist) Add(c cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) bool { - e, ok := w.set[c] - - // Adding want-have should not override want-block - if ok && (e.WantType == pb.Message_Wantlist_Block || wantType == pb.Message_Wantlist_Have) { - return false - } - - w.put(c, Entry{ - Cid: c, - Priority: priority, - WantType: wantType, - }) - - return true -} - -// Remove removes the given cid from the wantlist. -func (w *Wantlist) Remove(c cid.Cid) bool { - _, ok := w.set[c] - if !ok { - return false - } - - w.delete(c) - return true -} - -// Remove removes the given cid from the wantlist, respecting the type: -// Remove with want-have will not remove an existing want-block. -func (w *Wantlist) RemoveType(c cid.Cid, wantType pb.Message_Wantlist_WantType) bool { - e, ok := w.set[c] - if !ok { - return false - } - - // Removing want-have should not remove want-block - if e.WantType == pb.Message_Wantlist_Block && wantType == pb.Message_Wantlist_Have { - return false - } - - w.delete(c) - return true -} - -func (w *Wantlist) delete(c cid.Cid) { - delete(w.set, c) - w.cached = nil -} - -func (w *Wantlist) put(c cid.Cid, e Entry) { - w.cached = nil - w.set[c] = e -} - -// Contains returns the entry, if present, for the given CID, plus whether it -// was present. -func (w *Wantlist) Contains(c cid.Cid) (Entry, bool) { - e, ok := w.set[c] - return e, ok -} - -// Entries returns all wantlist entries for a want list, sorted by priority. -// -// DO NOT MODIFY. The returned list is cached. -func (w *Wantlist) Entries() []Entry { - if w.cached != nil { - return w.cached - } - es := make([]Entry, 0, len(w.set)) - for _, e := range w.set { - es = append(es, e) - } - sort.Sort(entrySlice(es)) - w.cached = es - return es[0:len(es):len(es)] -} - -// Absorb all the entries in other into this want list -func (w *Wantlist) Absorb(other *Wantlist) { - // Invalidate the cache up-front to avoid doing any work trying to keep it up-to-date. - w.cached = nil - - for _, e := range other.Entries() { - w.Add(e.Cid, e.Priority, e.WantType) - } + return libipfs.New() } diff --git a/client/wantlist/wantlist_test.go b/client/wantlist/wantlist_test.go deleted file mode 100644 index 2f64f385..00000000 --- a/client/wantlist/wantlist_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package wantlist - -import ( - "testing" - - pb "github.com/ipfs/go-bitswap/message/pb" - cid "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" -) - -var testcids []cid.Cid - -func init() { - strs := []string{ - "QmQL8LqkEgYXaDHdNYCG2mmpow7Sp8Z8Kt3QS688vyBeC7", - "QmcBDsdjgSXU7BP4A4V8LJCXENE5xVwnhrhRGVTJr9YCVj", - "QmQakgd2wDxc3uUF4orGdEm28zUT9Mmimp5pyPG2SFS9Gj", - } - for _, s := range strs { - c, err := cid.Decode(s) - if err != nil { - panic(err) - } - testcids = append(testcids, c) - } - -} - -type wli interface { - Contains(cid.Cid) (Entry, bool) -} - -func assertHasCid(t *testing.T, w wli, c cid.Cid) { - e, ok := w.Contains(c) - if !ok { - t.Fatal("expected to have ", c) - } - if !e.Cid.Equals(c) { - t.Fatal("returned entry had wrong cid value") - } -} - -func TestBasicWantlist(t *testing.T) { - wl := New() - - if !wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) { - t.Fatal("expected true") - } - assertHasCid(t, wl, testcids[0]) - if !wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { - t.Fatal("expected true") - } - assertHasCid(t, wl, testcids[0]) - assertHasCid(t, wl, testcids[1]) - - if wl.Len() != 2 { - t.Fatal("should have had two items") - } - - if wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { - t.Fatal("add shouldnt report success on second add") - } - assertHasCid(t, wl, testcids[0]) - assertHasCid(t, wl, testcids[1]) - - if wl.Len() != 2 { - t.Fatal("should have had two items") - } - - if !wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) { - t.Fatal("should have gotten true") - } - - assertHasCid(t, wl, testcids[1]) - if _, has := wl.Contains(testcids[0]); has { - t.Fatal("shouldnt have this cid") - } -} - -func TestAddHaveThenBlock(t *testing.T) { - wl := New() - - wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) - wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) - - e, ok := wl.Contains(testcids[0]) - if !ok { - t.Fatal("expected to have ", testcids[0]) - } - if e.WantType != pb.Message_Wantlist_Block { - t.Fatal("expected to be ", pb.Message_Wantlist_Block) - } -} - -func TestAddBlockThenHave(t *testing.T) { - wl := New() - - wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) - wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) - - e, ok := wl.Contains(testcids[0]) - if !ok { - t.Fatal("expected to have ", testcids[0]) - } - if e.WantType != pb.Message_Wantlist_Block { - t.Fatal("expected to be ", pb.Message_Wantlist_Block) - } -} - -func TestAddHaveThenRemoveBlock(t *testing.T) { - wl := New() - - wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) - wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) - - _, ok := wl.Contains(testcids[0]) - if ok { - t.Fatal("expected not to have ", testcids[0]) - } -} - -func TestAddBlockThenRemoveHave(t *testing.T) { - wl := New() - - wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) - wl.RemoveType(testcids[0], pb.Message_Wantlist_Have) - - e, ok := wl.Contains(testcids[0]) - if !ok { - t.Fatal("expected to have ", testcids[0]) - } - if e.WantType != pb.Message_Wantlist_Block { - t.Fatal("expected to be ", pb.Message_Wantlist_Block) - } -} - -func TestAddHaveThenRemoveAny(t *testing.T) { - wl := New() - - wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) - wl.Remove(testcids[0]) - - _, ok := wl.Contains(testcids[0]) - if ok { - t.Fatal("expected not to have ", testcids[0]) - } -} - -func TestAddBlockThenRemoveAny(t *testing.T) { - wl := New() - - wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) - wl.Remove(testcids[0]) - - _, ok := wl.Contains(testcids[0]) - if ok { - t.Fatal("expected not to have ", testcids[0]) - } -} - -func TestAbsort(t *testing.T) { - wl := New() - wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) - wl.Add(testcids[1], 4, pb.Message_Wantlist_Have) - wl.Add(testcids[2], 3, pb.Message_Wantlist_Have) - - wl2 := New() - wl2.Add(testcids[0], 2, pb.Message_Wantlist_Have) - wl2.Add(testcids[1], 1, pb.Message_Wantlist_Block) - - wl.Absorb(wl2) - - e, ok := wl.Contains(testcids[0]) - if !ok { - t.Fatal("expected to have ", testcids[0]) - } - if e.Priority != 5 { - t.Fatal("expected priority 5") - } - if e.WantType != pb.Message_Wantlist_Block { - t.Fatal("expected type ", pb.Message_Wantlist_Block) - } - - e, ok = wl.Contains(testcids[1]) - if !ok { - t.Fatal("expected to have ", testcids[1]) - } - if e.Priority != 1 { - t.Fatal("expected priority 1") - } - if e.WantType != pb.Message_Wantlist_Block { - t.Fatal("expected type ", pb.Message_Wantlist_Block) - } - - e, ok = wl.Contains(testcids[2]) - if !ok { - t.Fatal("expected to have ", testcids[2]) - } - if e.Priority != 3 { - t.Fatal("expected priority 3") - } - if e.WantType != pb.Message_Wantlist_Have { - t.Fatal("expected type ", pb.Message_Wantlist_Have) - } -} - -func TestSortEntries(t *testing.T) { - wl := New() - - wl.Add(testcids[0], 3, pb.Message_Wantlist_Block) - wl.Add(testcids[1], 5, pb.Message_Wantlist_Have) - wl.Add(testcids[2], 4, pb.Message_Wantlist_Have) - - entries := wl.Entries() - if !entries[0].Cid.Equals(testcids[1]) || - !entries[1].Cid.Equals(testcids[2]) || - !entries[2].Cid.Equals(testcids[0]) { - t.Fatal("wrong order") - } - -} - -// Test adding and removing interleaved with checking entries to make sure we clear the cache. -func TestCache(t *testing.T) { - wl := New() - - wl.Add(testcids[0], 3, pb.Message_Wantlist_Block) - require.Len(t, wl.Entries(), 1) - - wl.Add(testcids[1], 3, pb.Message_Wantlist_Block) - require.Len(t, wl.Entries(), 2) - - wl.Remove(testcids[1]) - require.Len(t, wl.Entries(), 1) -} diff --git a/decision/forward.go b/decision/forward.go index d19cda94..c27fab83 100644 --- a/decision/forward.go +++ b/decision/forward.go @@ -1,12 +1,14 @@ package decision -import "github.com/ipfs/go-bitswap/server" +import ( + libipfs "github.com/ipfs/go-libipfs/bitswap/decision" +) type ( - // DEPRECATED use server.Receipt instead - Receipt = server.Receipt - // DEPRECATED use server.ScoreLedger instead - ScoreLedger = server.ScoreLedger - // DEPRECATED use server.ScorePeerFunc instead - ScorePeerFunc = server.ScorePeerFunc + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/decision.Receipt instead + Receipt = libipfs.Receipt + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/decision.ScoreLedger instead + ScoreLedger = libipfs.ScoreLedger + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/decision.ScorePeerFunc instead + ScorePeerFunc = libipfs.ScorePeerFunc ) diff --git a/forward.go b/forward.go index 2beb7590..ff73c8b7 100644 --- a/forward.go +++ b/forward.go @@ -1,17 +1,16 @@ package bitswap import ( - "github.com/ipfs/go-bitswap/server" - "github.com/ipfs/go-bitswap/tracer" + libipfs "github.com/ipfs/go-libipfs/bitswap" ) type ( - // DEPRECATED - PeerBlockRequestFilter = server.PeerBlockRequestFilter - // DEPRECATED - TaskComparator = server.TaskComparator - // DEPRECATED - TaskInfo = server.TaskInfo - // DEPRECATED - Tracer = tracer.Tracer + // Deprecated: use github.com/ipfs/go-libipfs/bitswap.PeerBlockRequestFilter instead + PeerBlockRequestFilter = libipfs.PeerBlockRequestFilter + // Deprecated: use github.com/ipfs/go-libipfs/bitswap.TaskComparator instead + TaskComparator = libipfs.TaskComparator + // Deprecated: use github.com/ipfs/go-libipfs/bitswap.TaskInfo instead + TaskInfo = libipfs.TaskInfo + // Deprecated: use github.com/ipfs/go-libipfs/bitswap.Tracer instead + Tracer = libipfs.Tracer ) diff --git a/go.mod b/go.mod index 65779596..4a34de56 100644 --- a/go.mod +++ b/go.mod @@ -1,54 +1,45 @@ module github.com/ipfs/go-bitswap require ( - github.com/benbjohnson/clock v1.3.0 - github.com/cskr/pubsub v1.0.2 github.com/gogo/protobuf v1.3.2 - github.com/google/uuid v1.3.0 - github.com/ipfs/go-block-format v0.0.3 github.com/ipfs/go-cid v0.3.2 - github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-detect-race v0.0.1 github.com/ipfs/go-ipfs-blockstore v1.2.0 - github.com/ipfs/go-ipfs-blocksutil v0.0.1 github.com/ipfs/go-ipfs-delay v0.0.1 - github.com/ipfs/go-ipfs-exchange-interface v0.2.0 github.com/ipfs/go-ipfs-routing v0.3.0 - github.com/ipfs/go-ipfs-util v0.0.2 - github.com/ipfs/go-ipld-format v0.3.0 - github.com/ipfs/go-log v1.0.5 + github.com/ipfs/go-libipfs v0.4.0 github.com/ipfs/go-metrics-interface v0.0.1 - github.com/ipfs/go-peertaskqueue v0.8.0 - github.com/jbenet/goprocess v0.1.4 - github.com/libp2p/go-buffer-pool v0.1.0 - github.com/libp2p/go-libp2p v0.22.0 + github.com/libp2p/go-libp2p v0.23.4 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-msgio v0.2.0 - github.com/multiformats/go-multiaddr v0.7.0 - github.com/multiformats/go-multistream v0.3.3 - github.com/stretchr/testify v1.8.0 - go.opentelemetry.io/otel v1.7.0 - go.opentelemetry.io/otel/trace v1.7.0 - go.uber.org/multierr v1.8.0 - go.uber.org/zap v1.22.0 ) require ( - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/cskr/pubsub v1.0.2 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/google/go-cmp v0.5.8 // indirect github.com/google/gopacket v1.1.19 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/huin/goupnp v1.0.3 // indirect github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/go-block-format v0.1.1 // indirect + github.com/ipfs/go-datastore v0.6.0 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect + github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect github.com/ipfs/go-ipfs-pq v0.0.2 // indirect + github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipld-format v0.3.0 // indirect + github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/ipfs/go-peertaskqueue v0.8.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect - github.com/klauspost/cpuid/v2 v2.1.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/klauspost/cpuid/v2 v2.1.1 // indirect github.com/koron/go-ssdp v0.0.3 // indirect + github.com/kr/pretty v0.2.1 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect github.com/libp2p/go-libp2p-record v0.2.0 // indirect @@ -60,27 +51,30 @@ require ( github.com/miekg/dns v1.1.50 // indirect github.com/minio/sha256-simd v1.0.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.0.4 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.1.0 // indirect + github.com/multiformats/go-multiaddr v0.8.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.5.0 // indirect + github.com/multiformats/go-multicodec v0.6.0 // indirect github.com/multiformats/go-multihash v0.2.1 // indirect + github.com/multiformats/go-multistream v0.3.3 // indirect github.com/multiformats/go-varint v0.0.6 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect + go.opentelemetry.io/otel/trace v1.7.0 // indirect go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.23.0 // indirect golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect + golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5 // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect golang.org/x/tools v0.1.12 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect ) diff --git a/go.sum b/go.sum index 271234f5..b56b9450 100644 --- a/go.sum +++ b/go.sum @@ -5,9 +5,8 @@ github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -17,7 +16,7 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= @@ -34,9 +33,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= @@ -56,8 +54,9 @@ github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3 github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= +github.com/ipfs/go-block-format v0.1.1 h1:129vSO3zwbsYADcyQWcOYiuCpAqt462SFfqFHdFJhhI= +github.com/ipfs/go-block-format v0.1.1/go.mod h1:+McEIT+g52p+zz5xGAABGSOKrzmrdX97bc0USBdWPUs= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= @@ -72,7 +71,6 @@ github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46U github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= -github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= @@ -89,6 +87,8 @@ github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2PO github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-format v0.3.0 h1:Mwm2oRLzIuUwEPewWAWyMuuBQUsn3awfFEYVb8akMOQ= github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-libipfs v0.4.0 h1:TkUxJGjtPnSzAgkw7VjS0/DBay3MPjmTBa4dGdUQCDE= +github.com/ipfs/go-libipfs v0.4.0/go.mod h1:XsU2cP9jBhDrXoJDe0WxikB8XcVmD3k2MEZvB3dbYu8= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= @@ -108,11 +108,11 @@ github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZl github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.10 h1:Ai8UzuomSCDw90e1qNMtb15msBXsNpH6gzkkENQNcJo= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= -github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.1.1 h1:t0wUqjowdm8ezddV5k0tLWVklVuvLJpoHeb4WBdydm0= +github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= @@ -128,8 +128,8 @@ github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QT github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-libp2p v0.22.0 h1:2Tce0kHOp5zASFKJbNzRElvh0iZwdtG5uZheNW8chIw= -github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= +github.com/libp2p/go-libp2p v0.23.4 h1:hWi9XHSOVFR1oDWRk7rigfyA4XNMuYL20INNybP9LP8= +github.com/libp2p/go-libp2p v0.23.4/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= @@ -147,10 +147,8 @@ github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+O github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= -github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= -github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= -github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= +github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= +github.com/lucas-clemente/quic-go v0.29.1 h1:Z+WMJ++qMLhvpFkRZA+jl3BTxUjm415YBmWanXB8zP0= github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= @@ -178,14 +176,14 @@ github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjW github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= -github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.7.0 h1:gskHcdaCyPtp9XskVwtvEeQOG465sCohbQIirSyqxrc= -github.com/multiformats/go-multiaddr v0.7.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= +github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -194,8 +192,8 @@ github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= -github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs= -github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= @@ -219,7 +217,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= @@ -230,13 +228,11 @@ github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -258,8 +254,8 @@ go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95a go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= -go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -268,6 +264,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b h1:SCE/18RnFsLrjydh/R/s5EVvHoZprqEQUuoxK8q2Pc4= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -288,8 +285,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E= -golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5 h1:KafLifaRFIuSJ5C+7CyFJOF9haxKNC1CEIDk8GX6X0k= +golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -340,7 +337,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -348,7 +344,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= diff --git a/internal/defaults/defaults.go b/internal/defaults/defaults.go deleted file mode 100644 index 6f7c2e74..00000000 --- a/internal/defaults/defaults.go +++ /dev/null @@ -1,27 +0,0 @@ -package defaults - -import ( - "time" -) - -const ( - // these requests take at _least_ two minutes at the moment. - ProvideTimeout = time.Minute * 3 - ProvSearchDelay = time.Second - - // Number of concurrent workers in decision engine that process requests to the blockstore - BitswapEngineBlockstoreWorkerCount = 128 - // the total number of simultaneous threads sending outgoing messages - BitswapTaskWorkerCount = 8 - // how many worker threads to start for decision engine task worker - BitswapEngineTaskWorkerCount = 8 - // the total amount of bytes that a peer should have outstanding, it is utilized by the decision engine - BitswapMaxOutstandingBytesPerPeer = 1 << 20 - // the number of bytes we attempt to make each outgoing bitswap message - BitswapEngineTargetMessageSize = 16 * 1024 - // HasBlockBufferSize is the buffer size of the channel for new blocks - // that need to be provided. They should get pulled over by the - // provideCollector even before they are actually provided. - // TODO: Does this need to be this large givent that? - HasBlockBufferSize = 256 -) diff --git a/internal/testutil/testutil.go b/internal/testutil/testutil.go deleted file mode 100644 index 355f9462..00000000 --- a/internal/testutil/testutil.go +++ /dev/null @@ -1,140 +0,0 @@ -package testutil - -import ( - "fmt" - "math/rand" - - "github.com/ipfs/go-bitswap/client/wantlist" - bsmsg "github.com/ipfs/go-bitswap/message" - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var blockGenerator = blocksutil.NewBlockGenerator() -var prioritySeq int32 - -// GenerateBlocksOfSize generates a series of blocks of the given byte size -func GenerateBlocksOfSize(n int, size int64) []blocks.Block { - generatedBlocks := make([]blocks.Block, 0, n) - for i := 0; i < n; i++ { - // rand.Read never errors - buf := make([]byte, size) - rand.Read(buf) - b := blocks.NewBlock(buf) - generatedBlocks = append(generatedBlocks, b) - - } - return generatedBlocks -} - -// GenerateCids produces n content identifiers. -func GenerateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) - for i := 0; i < n; i++ { - c := blockGenerator.Next().Cid() - cids = append(cids, c) - } - return cids -} - -// GenerateMessageEntries makes fake bitswap message entries. -func GenerateMessageEntries(n int, isCancel bool) []bsmsg.Entry { - bsmsgs := make([]bsmsg.Entry, 0, n) - for i := 0; i < n; i++ { - prioritySeq++ - msg := bsmsg.Entry{ - Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq), - Cancel: isCancel, - } - bsmsgs = append(bsmsgs, msg) - } - return bsmsgs -} - -var peerSeq int - -// GeneratePeers creates n peer ids. -func GeneratePeers(n int) []peer.ID { - peerIds := make([]peer.ID, 0, n) - for i := 0; i < n; i++ { - peerSeq++ - p := peer.ID(fmt.Sprint(i)) - peerIds = append(peerIds, p) - } - return peerIds -} - -var nextSession uint64 - -// GenerateSessionID make a unit session identifier. -func GenerateSessionID() uint64 { - nextSession++ - return uint64(nextSession) -} - -// ContainsPeer returns true if a peer is found n a list of peers. -func ContainsPeer(peers []peer.ID, p peer.ID) bool { - for _, n := range peers { - if p == n { - return true - } - } - return false -} - -// IndexOf returns the index of a given cid in an array of blocks -func IndexOf(blks []blocks.Block, c cid.Cid) int { - for i, n := range blks { - if n.Cid() == c { - return i - } - } - return -1 -} - -// ContainsBlock returns true if a block is found n a list of blocks -func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { - return IndexOf(blks, block.Cid()) != -1 -} - -// ContainsKey returns true if a key is found n a list of CIDs. -func ContainsKey(ks []cid.Cid, c cid.Cid) bool { - for _, k := range ks { - if c == k { - return true - } - } - return false -} - -// MatchKeysIgnoreOrder returns true if the lists of CIDs match (even if -// they're in a different order) -func MatchKeysIgnoreOrder(ks1 []cid.Cid, ks2 []cid.Cid) bool { - if len(ks1) != len(ks2) { - return false - } - - for _, k := range ks1 { - if !ContainsKey(ks2, k) { - return false - } - } - return true -} - -// MatchPeersIgnoreOrder returns true if the lists of peers match (even if -// they're in a different order) -func MatchPeersIgnoreOrder(ps1 []peer.ID, ps2 []peer.ID) bool { - if len(ps1) != len(ps2) { - return false - } - - for _, p := range ps1 { - if !ContainsPeer(ps2, p) { - return false - } - } - return true -} diff --git a/internal/testutil/testutil_test.go b/internal/testutil/testutil_test.go deleted file mode 100644 index c4dc1af1..00000000 --- a/internal/testutil/testutil_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package testutil - -import ( - "testing" - - blocks "github.com/ipfs/go-block-format" -) - -func TestGenerateBlocksOfSize(t *testing.T) { - for _, b1 := range GenerateBlocksOfSize(10, 100) { - b2 := blocks.NewBlock(b1.RawData()) - if b2.Cid() != b1.Cid() { - t.Fatal("block CIDs mismatch") - } - } -} diff --git a/internal/tracing.go b/internal/tracing.go deleted file mode 100644 index aa1f7992..00000000 --- a/internal/tracing.go +++ /dev/null @@ -1,13 +0,0 @@ -package internal - -import ( - "context" - "fmt" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) -} diff --git a/message/message.go b/message/message.go index b9c7a46b..01b2b0fd 100644 --- a/message/message.go +++ b/message/message.go @@ -1,500 +1,57 @@ package message import ( - "encoding/binary" - "errors" "io" - "github.com/ipfs/go-bitswap/client/wantlist" - pb "github.com/ipfs/go-bitswap/message/pb" - - blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" - pool "github.com/libp2p/go-buffer-pool" msgio "github.com/libp2p/go-msgio" - u "github.com/ipfs/go-ipfs-util" - "github.com/libp2p/go-libp2p/core/network" + libipfs "github.com/ipfs/go-libipfs/bitswap/message" ) // BitSwapMessage is the basic interface for interacting building, encoding, // and decoding messages sent on the BitSwap protocol. -type BitSwapMessage interface { - // Wantlist returns a slice of unique keys that represent data wanted by - // the sender. - Wantlist() []Entry - - // Blocks returns a slice of unique blocks. - Blocks() []blocks.Block - // BlockPresences returns the list of HAVE / DONT_HAVE in the message - BlockPresences() []BlockPresence - // Haves returns the Cids for each HAVE - Haves() []cid.Cid - // DontHaves returns the Cids for each DONT_HAVE - DontHaves() []cid.Cid - // PendingBytes returns the number of outstanding bytes of data that the - // engine has yet to send to the client (because they didn't fit in this - // message) - PendingBytes() int32 - - // AddEntry adds an entry to the Wantlist. - AddEntry(key cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int - - // Cancel adds a CANCEL for the given CID to the message - // Returns the size of the CANCEL entry in the protobuf - Cancel(key cid.Cid) int - - // Remove removes any entries for the given CID. Useful when the want - // status for the CID changes when preparing a message. - Remove(key cid.Cid) - - // Empty indicates whether the message has any information - Empty() bool - // Size returns the size of the message in bytes - Size() int - - // A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set - Full() bool - - // AddBlock adds a block to the message - AddBlock(blocks.Block) - // AddBlockPresence adds a HAVE / DONT_HAVE for the given Cid to the message - AddBlockPresence(cid.Cid, pb.Message_BlockPresenceType) - // AddHave adds a HAVE for the given Cid to the message - AddHave(cid.Cid) - // AddDontHave adds a DONT_HAVE for the given Cid to the message - AddDontHave(cid.Cid) - // SetPendingBytes sets the number of bytes of data that are yet to be sent - // to the client (because they didn't fit in this message) - SetPendingBytes(int32) - Exportable - - Loggable() map[string]interface{} - - // Reset the values in the message back to defaults, so it can be reused - Reset(bool) - - // Clone the message fields - Clone() BitSwapMessage -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message.BitSwapMessage instead +type BitSwapMessage = libipfs.BitSwapMessage // Exportable is an interface for structures than can be // encoded in a bitswap protobuf. -type Exportable interface { - // Note that older Bitswap versions use a different wire format, so we need - // to convert the message to the appropriate format depending on which - // version of the protocol the remote peer supports. - ToProtoV0() *pb.Message - ToProtoV1() *pb.Message - ToNetV0(w io.Writer) error - ToNetV1(w io.Writer) error -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message.Exportable instead +type Exportable = libipfs.Exportable // BlockPresence represents a HAVE / DONT_HAVE for a given Cid -type BlockPresence struct { - Cid cid.Cid - Type pb.Message_BlockPresenceType -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message.BlockPresence instead +type BlockPresence = libipfs.BlockPresence // Entry is a wantlist entry in a Bitswap message, with flags indicating // - whether message is a cancel // - whether requester wants a DONT_HAVE message // - whether requester wants a HAVE message (instead of the block) -type Entry struct { - wantlist.Entry - Cancel bool - SendDontHave bool -} - -// Get the size of the entry on the wire -func (e *Entry) Size() int { - epb := e.ToPB() - return epb.Size() -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message.Entry instead +type Entry = libipfs.Entry -// Get the entry in protobuf form -func (e *Entry) ToPB() pb.Message_Wantlist_Entry { - return pb.Message_Wantlist_Entry{ - Block: pb.Cid{Cid: e.Cid}, - Priority: int32(e.Priority), - Cancel: e.Cancel, - WantType: e.WantType, - SendDontHave: e.SendDontHave, - } -} - -var MaxEntrySize = maxEntrySize() - -func maxEntrySize() int { - var maxInt32 int32 = (1 << 31) - 1 - - c := cid.NewCidV0(u.Hash([]byte("cid"))) - e := Entry{ - Entry: wantlist.Entry{ - Cid: c, - Priority: maxInt32, - WantType: pb.Message_Wantlist_Have, - }, - SendDontHave: true, // true takes up more space than false - Cancel: true, - } - return e.Size() -} - -type impl struct { - full bool - wantlist map[cid.Cid]*Entry - blocks map[cid.Cid]blocks.Block - blockPresences map[cid.Cid]pb.Message_BlockPresenceType - pendingBytes int32 -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message.MaxEntrySize instead +var MaxEntrySize = libipfs.MaxEntrySize // New returns a new, empty bitswap message +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message.New instead func New(full bool) BitSwapMessage { - return newMsg(full) -} - -func newMsg(full bool) *impl { - return &impl{ - full: full, - wantlist: make(map[cid.Cid]*Entry), - blocks: make(map[cid.Cid]blocks.Block), - blockPresences: make(map[cid.Cid]pb.Message_BlockPresenceType), - } -} - -// Clone the message fields -func (m *impl) Clone() BitSwapMessage { - msg := newMsg(m.full) - for k := range m.wantlist { - msg.wantlist[k] = m.wantlist[k] - } - for k := range m.blocks { - msg.blocks[k] = m.blocks[k] - } - for k := range m.blockPresences { - msg.blockPresences[k] = m.blockPresences[k] - } - msg.pendingBytes = m.pendingBytes - return msg -} - -// Reset the values in the message back to defaults, so it can be reused -func (m *impl) Reset(full bool) { - m.full = full - for k := range m.wantlist { - delete(m.wantlist, k) - } - for k := range m.blocks { - delete(m.blocks, k) - } - for k := range m.blockPresences { - delete(m.blockPresences, k) - } - m.pendingBytes = 0 -} - -var errCidMissing = errors.New("missing cid") - -func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { - m := newMsg(pbm.Wantlist.Full) - for _, e := range pbm.Wantlist.Entries { - if !e.Block.Cid.Defined() { - return nil, errCidMissing - } - m.addEntry(e.Block.Cid, e.Priority, e.Cancel, e.WantType, e.SendDontHave) - } - - // deprecated - for _, d := range pbm.Blocks { - // CIDv0, sha256, protobuf only - b := blocks.NewBlock(d) - m.AddBlock(b) - } - // - - for _, b := range pbm.GetPayload() { - pref, err := cid.PrefixFromBytes(b.GetPrefix()) - if err != nil { - return nil, err - } - - c, err := pref.Sum(b.GetData()) - if err != nil { - return nil, err - } - - blk, err := blocks.NewBlockWithCid(b.GetData(), c) - if err != nil { - return nil, err - } - - m.AddBlock(blk) - } - - for _, bi := range pbm.GetBlockPresences() { - if !bi.Cid.Cid.Defined() { - return nil, errCidMissing - } - m.AddBlockPresence(bi.Cid.Cid, bi.Type) - } - - m.pendingBytes = pbm.PendingBytes - - return m, nil -} - -func (m *impl) Full() bool { - return m.full -} - -func (m *impl) Empty() bool { - return len(m.blocks) == 0 && len(m.wantlist) == 0 && len(m.blockPresences) == 0 -} - -func (m *impl) Wantlist() []Entry { - out := make([]Entry, 0, len(m.wantlist)) - for _, e := range m.wantlist { - out = append(out, *e) - } - return out -} - -func (m *impl) Blocks() []blocks.Block { - bs := make([]blocks.Block, 0, len(m.blocks)) - for _, block := range m.blocks { - bs = append(bs, block) - } - return bs -} - -func (m *impl) BlockPresences() []BlockPresence { - bps := make([]BlockPresence, 0, len(m.blockPresences)) - for c, t := range m.blockPresences { - bps = append(bps, BlockPresence{c, t}) - } - return bps -} - -func (m *impl) Haves() []cid.Cid { - return m.getBlockPresenceByType(pb.Message_Have) -} - -func (m *impl) DontHaves() []cid.Cid { - return m.getBlockPresenceByType(pb.Message_DontHave) -} - -func (m *impl) getBlockPresenceByType(t pb.Message_BlockPresenceType) []cid.Cid { - cids := make([]cid.Cid, 0, len(m.blockPresences)) - for c, bpt := range m.blockPresences { - if bpt == t { - cids = append(cids, c) - } - } - return cids -} - -func (m *impl) PendingBytes() int32 { - return m.pendingBytes -} - -func (m *impl) SetPendingBytes(pendingBytes int32) { - m.pendingBytes = pendingBytes -} - -func (m *impl) Remove(k cid.Cid) { - delete(m.wantlist, k) -} - -func (m *impl) Cancel(k cid.Cid) int { - return m.addEntry(k, 0, true, pb.Message_Wantlist_Block, false) -} - -func (m *impl) AddEntry(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { - return m.addEntry(k, priority, false, wantType, sendDontHave) -} - -func (m *impl) addEntry(c cid.Cid, priority int32, cancel bool, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { - e, exists := m.wantlist[c] - if exists { - // Only change priority if want is of the same type - if e.WantType == wantType { - e.Priority = priority - } - // Only change from "dont cancel" to "do cancel" - if cancel { - e.Cancel = cancel - } - // Only change from "dont send" to "do send" DONT_HAVE - if sendDontHave { - e.SendDontHave = sendDontHave - } - // want-block overrides existing want-have - if wantType == pb.Message_Wantlist_Block && e.WantType == pb.Message_Wantlist_Have { - e.WantType = wantType - } - m.wantlist[c] = e - return 0 - } - - e = &Entry{ - Entry: wantlist.Entry{ - Cid: c, - Priority: priority, - WantType: wantType, - }, - SendDontHave: sendDontHave, - Cancel: cancel, - } - m.wantlist[c] = e - - return e.Size() -} - -func (m *impl) AddBlock(b blocks.Block) { - delete(m.blockPresences, b.Cid()) - m.blocks[b.Cid()] = b -} - -func (m *impl) AddBlockPresence(c cid.Cid, t pb.Message_BlockPresenceType) { - if _, ok := m.blocks[c]; ok { - return - } - m.blockPresences[c] = t -} - -func (m *impl) AddHave(c cid.Cid) { - m.AddBlockPresence(c, pb.Message_Have) -} - -func (m *impl) AddDontHave(c cid.Cid) { - m.AddBlockPresence(c, pb.Message_DontHave) -} - -func (m *impl) Size() int { - size := 0 - for _, block := range m.blocks { - size += len(block.RawData()) - } - for c := range m.blockPresences { - size += BlockPresenceSize(c) - } - for _, e := range m.wantlist { - size += e.Size() - } - - return size + return libipfs.New(full) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message.BlockPresenceSize instead func BlockPresenceSize(c cid.Cid) int { - return (&pb.Message_BlockPresence{ - Cid: pb.Cid{Cid: c}, - Type: pb.Message_Have, - }).Size() + return libipfs.BlockPresenceSize(c) } // FromNet generates a new BitswapMessage from incoming data on an io.Reader. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message.FromNet instead func FromNet(r io.Reader) (BitSwapMessage, error) { - reader := msgio.NewVarintReaderSize(r, network.MessageSizeMax) - return FromMsgReader(reader) + return libipfs.FromNet(r) } // FromPBReader generates a new Bitswap message from a gogo-protobuf reader +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message.FromMsgReader instead func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { - msg, err := r.ReadMsg() - if err != nil { - return nil, err - } - - var pb pb.Message - err = pb.Unmarshal(msg) - r.ReleaseMsg(msg) - if err != nil { - return nil, err - } - - return newMessageFromProto(pb) -} - -func (m *impl) ToProtoV0() *pb.Message { - pbm := new(pb.Message) - pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) - for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) - } - pbm.Wantlist.Full = m.full - - blocks := m.Blocks() - pbm.Blocks = make([][]byte, 0, len(blocks)) - for _, b := range blocks { - pbm.Blocks = append(pbm.Blocks, b.RawData()) - } - return pbm -} - -func (m *impl) ToProtoV1() *pb.Message { - pbm := new(pb.Message) - pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) - for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) - } - pbm.Wantlist.Full = m.full - - blocks := m.Blocks() - pbm.Payload = make([]pb.Message_Block, 0, len(blocks)) - for _, b := range blocks { - pbm.Payload = append(pbm.Payload, pb.Message_Block{ - Data: b.RawData(), - Prefix: b.Cid().Prefix().Bytes(), - }) - } - - pbm.BlockPresences = make([]pb.Message_BlockPresence, 0, len(m.blockPresences)) - for c, t := range m.blockPresences { - pbm.BlockPresences = append(pbm.BlockPresences, pb.Message_BlockPresence{ - Cid: pb.Cid{Cid: c}, - Type: t, - }) - } - - pbm.PendingBytes = m.PendingBytes() - - return pbm -} - -func (m *impl) ToNetV0(w io.Writer) error { - return write(w, m.ToProtoV0()) -} - -func (m *impl) ToNetV1(w io.Writer) error { - return write(w, m.ToProtoV1()) -} - -func write(w io.Writer, m *pb.Message) error { - size := m.Size() - - buf := pool.Get(size + binary.MaxVarintLen64) - defer pool.Put(buf) - - n := binary.PutUvarint(buf, uint64(size)) - - written, err := m.MarshalTo(buf[n:]) - if err != nil { - return err - } - n += written - - _, err = w.Write(buf[:n]) - return err -} - -func (m *impl) Loggable() map[string]interface{} { - blocks := make([]string, 0, len(m.blocks)) - for _, v := range m.blocks { - blocks = append(blocks, v.Cid().String()) - } - return map[string]interface{}{ - "blocks": blocks, - "wants": m.Wantlist(), - } + return libipfs.FromMsgReader(r) } diff --git a/message/message_test.go b/message/message_test.go deleted file mode 100644 index 46de4961..00000000 --- a/message/message_test.go +++ /dev/null @@ -1,311 +0,0 @@ -package message - -import ( - "bytes" - "testing" - - "github.com/ipfs/go-bitswap/client/wantlist" - pb "github.com/ipfs/go-bitswap/message/pb" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - u "github.com/ipfs/go-ipfs-util" -) - -func mkFakeCid(s string) cid.Cid { - return cid.NewCidV0(u.Hash([]byte(s))) -} - -func TestAppendWanted(t *testing.T) { - str := mkFakeCid("foo") - m := New(true) - m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) - - if !wantlistContains(&m.ToProtoV0().Wantlist, str) { - t.Fail() - } -} - -func TestNewMessageFromProto(t *testing.T) { - str := mkFakeCid("a_key") - protoMessage := new(pb.Message) - protoMessage.Wantlist.Entries = []pb.Message_Wantlist_Entry{ - {Block: pb.Cid{Cid: str}}, - } - if !wantlistContains(&protoMessage.Wantlist, str) { - t.Fail() - } - m, err := newMessageFromProto(*protoMessage) - if err != nil { - t.Fatal(err) - } - - if !wantlistContains(&m.ToProtoV0().Wantlist, str) { - t.Fail() - } -} - -func TestAppendBlock(t *testing.T) { - - strs := make([]string, 2) - strs = append(strs, "Celeritas") - strs = append(strs, "Incendia") - - m := New(true) - for _, str := range strs { - block := blocks.NewBlock([]byte(str)) - m.AddBlock(block) - } - - // assert strings are in proto message - for _, blockbytes := range m.ToProtoV0().GetBlocks() { - s := bytes.NewBuffer(blockbytes).String() - if !contains(strs, s) { - t.Fail() - } - } -} - -func TestWantlist(t *testing.T) { - keystrs := []cid.Cid{mkFakeCid("foo"), mkFakeCid("bar"), mkFakeCid("baz"), mkFakeCid("bat")} - m := New(true) - for _, s := range keystrs { - m.AddEntry(s, 1, pb.Message_Wantlist_Block, true) - } - exported := m.Wantlist() - - for _, k := range exported { - present := false - for _, s := range keystrs { - - if s.Equals(k.Cid) { - present = true - } - } - if !present { - t.Logf("%v isn't in original list", k.Cid) - t.Fail() - } - } -} - -func TestCopyProtoByValue(t *testing.T) { - str := mkFakeCid("foo") - m := New(true) - protoBeforeAppend := m.ToProtoV0() - m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) - if wantlistContains(&protoBeforeAppend.Wantlist, str) { - t.Fail() - } -} - -func TestToNetFromNetPreservesWantList(t *testing.T) { - original := New(true) - original.AddEntry(mkFakeCid("M"), 1, pb.Message_Wantlist_Block, true) - original.AddEntry(mkFakeCid("B"), 1, pb.Message_Wantlist_Block, true) - original.AddEntry(mkFakeCid("D"), 1, pb.Message_Wantlist_Block, true) - original.AddEntry(mkFakeCid("T"), 1, pb.Message_Wantlist_Block, true) - original.AddEntry(mkFakeCid("F"), 1, pb.Message_Wantlist_Block, true) - - buf := new(bytes.Buffer) - if err := original.ToNetV1(buf); err != nil { - t.Fatal(err) - } - - copied, err := FromNet(buf) - if err != nil { - t.Fatal(err) - } - - if !copied.Full() { - t.Fatal("fullness attribute got dropped on marshal") - } - - keys := make(map[cid.Cid]bool) - for _, k := range copied.Wantlist() { - keys[k.Cid] = true - } - - for _, k := range original.Wantlist() { - if _, ok := keys[k.Cid]; !ok { - t.Fatalf("Key Missing: \"%v\"", k) - } - } -} - -func TestToAndFromNetMessage(t *testing.T) { - - original := New(true) - original.AddBlock(blocks.NewBlock([]byte("W"))) - original.AddBlock(blocks.NewBlock([]byte("E"))) - original.AddBlock(blocks.NewBlock([]byte("F"))) - original.AddBlock(blocks.NewBlock([]byte("M"))) - - buf := new(bytes.Buffer) - if err := original.ToNetV1(buf); err != nil { - t.Fatal(err) - } - - m2, err := FromNet(buf) - if err != nil { - t.Fatal(err) - } - - keys := make(map[cid.Cid]bool) - for _, b := range m2.Blocks() { - keys[b.Cid()] = true - } - - for _, b := range original.Blocks() { - if _, ok := keys[b.Cid()]; !ok { - t.Fail() - } - } -} - -func wantlistContains(wantlist *pb.Message_Wantlist, c cid.Cid) bool { - for _, e := range wantlist.GetEntries() { - if e.Block.Cid.Defined() && c.Equals(e.Block.Cid) { - return true - } - } - return false -} - -func contains(strs []string, x string) bool { - for _, s := range strs { - if s == x { - return true - } - } - return false -} - -func TestDuplicates(t *testing.T) { - b := blocks.NewBlock([]byte("foo")) - msg := New(true) - - msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) - msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) - if len(msg.Wantlist()) != 1 { - t.Fatal("Duplicate in BitSwapMessage") - } - - msg.AddBlock(b) - msg.AddBlock(b) - if len(msg.Blocks()) != 1 { - t.Fatal("Duplicate in BitSwapMessage") - } - - b2 := blocks.NewBlock([]byte("bar")) - msg.AddBlockPresence(b2.Cid(), pb.Message_Have) - msg.AddBlockPresence(b2.Cid(), pb.Message_Have) - if len(msg.Haves()) != 1 { - t.Fatal("Duplicate in BitSwapMessage") - } -} - -func TestBlockPresences(t *testing.T) { - b1 := blocks.NewBlock([]byte("foo")) - b2 := blocks.NewBlock([]byte("bar")) - msg := New(true) - - msg.AddBlockPresence(b1.Cid(), pb.Message_Have) - msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) - if len(msg.Haves()) != 1 || !msg.Haves()[0].Equals(b1.Cid()) { - t.Fatal("Expected HAVE") - } - if len(msg.DontHaves()) != 1 || !msg.DontHaves()[0].Equals(b2.Cid()) { - t.Fatal("Expected HAVE") - } - - msg.AddBlock(b1) - if len(msg.Haves()) != 0 { - t.Fatal("Expected block to overwrite HAVE") - } - - msg.AddBlock(b2) - if len(msg.DontHaves()) != 0 { - t.Fatal("Expected block to overwrite DONT_HAVE") - } - - msg.AddBlockPresence(b1.Cid(), pb.Message_Have) - if len(msg.Haves()) != 0 { - t.Fatal("Expected HAVE not to overwrite block") - } - - msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) - if len(msg.DontHaves()) != 0 { - t.Fatal("Expected DONT_HAVE not to overwrite block") - } -} - -func TestAddWantlistEntry(t *testing.T) { - b := blocks.NewBlock([]byte("foo")) - msg := New(true) - - msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Have, false) - msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) - entries := msg.Wantlist() - if len(entries) != 1 { - t.Fatal("Duplicate in BitSwapMessage") - } - e := entries[0] - if e.WantType != pb.Message_Wantlist_Block { - t.Fatal("want-block should override want-have") - } - if e.SendDontHave != true { - t.Fatal("true SendDontHave should override false SendDontHave") - } - if e.Priority != 1 { - t.Fatal("priority should only be overridden if wants are of same type") - } - - msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) - e = msg.Wantlist()[0] - if e.Priority != 2 { - t.Fatal("priority should be overridden if wants are of same type") - } - - msg.AddEntry(b.Cid(), 3, pb.Message_Wantlist_Have, false) - e = msg.Wantlist()[0] - if e.WantType != pb.Message_Wantlist_Block { - t.Fatal("want-have should not override want-block") - } - if e.SendDontHave != true { - t.Fatal("false SendDontHave should not override true SendDontHave") - } - if e.Priority != 2 { - t.Fatal("priority should only be overridden if wants are of same type") - } - - msg.Cancel(b.Cid()) - e = msg.Wantlist()[0] - if !e.Cancel { - t.Fatal("cancel should override want") - } - - msg.AddEntry(b.Cid(), 10, pb.Message_Wantlist_Block, true) - if !e.Cancel { - t.Fatal("want should not override cancel") - } -} - -func TestEntrySize(t *testing.T) { - blockGenerator := blocksutil.NewBlockGenerator() - c := blockGenerator.Next().Cid() - e := Entry{ - Entry: wantlist.Entry{ - Cid: c, - Priority: 10, - WantType: pb.Message_Wantlist_Have, - }, - SendDontHave: true, - Cancel: false, - } - epb := e.ToPB() - if e.Size() != epb.Size() { - t.Fatal("entry size calculation incorrect", e.Size(), epb.Size()) - } -} diff --git a/message/pb/Makefile b/message/pb/Makefile deleted file mode 100644 index df34e54b..00000000 --- a/message/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --proto_path=$(GOPATH)/src:. --gogofaster_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/message/pb/cid.go b/message/pb/cid.go index 34862b3d..27bf1262 100644 --- a/message/pb/cid.go +++ b/message/pb/cid.go @@ -1,7 +1,7 @@ package bitswap_message_pb import ( - "github.com/ipfs/go-cid" + libipfs "github.com/ipfs/go-libipfs/bitswap/message/pb" ) // NOTE: Don't "embed" the cid, wrap it like we're doing here. Otherwise, gogo @@ -9,36 +9,5 @@ import ( // Cid is a custom type for CIDs in protobufs, that allows us to avoid // reallocating. -type Cid struct { - Cid cid.Cid -} - -func (c Cid) Marshal() ([]byte, error) { - return c.Cid.Bytes(), nil -} - -func (c *Cid) MarshalTo(data []byte) (int, error) { - // intentionally using KeyString here to avoid allocating. - return copy(data[:c.Size()], c.Cid.KeyString()), nil -} - -func (c *Cid) Unmarshal(data []byte) (err error) { - c.Cid, err = cid.Cast(data) - return err -} - -func (c *Cid) Size() int { - return len(c.Cid.KeyString()) -} - -func (c Cid) MarshalJSON() ([]byte, error) { - return c.Cid.MarshalJSON() -} - -func (c *Cid) UnmarshalJSON(data []byte) error { - return c.Cid.UnmarshalJSON(data) -} - -func (c Cid) Equal(other Cid) bool { - return c.Cid.Equals(c.Cid) -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Cid instead +type Cid = libipfs.Cid diff --git a/message/pb/cid_test.go b/message/pb/cid_test.go deleted file mode 100644 index 3d4b87a7..00000000 --- a/message/pb/cid_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package bitswap_message_pb_test - -import ( - "bytes" - "testing" - - "github.com/ipfs/go-cid" - u "github.com/ipfs/go-ipfs-util" - - pb "github.com/ipfs/go-bitswap/message/pb" -) - -func TestCID(t *testing.T) { - var expected = [...]byte{ - 10, 34, 18, 32, 195, 171, - 143, 241, 55, 32, 232, 173, - 144, 71, 221, 57, 70, 107, - 60, 137, 116, 229, 146, 194, - 250, 56, 61, 74, 57, 96, - 113, 76, 174, 240, 196, 242, - } - - c := cid.NewCidV0(u.Hash([]byte("foobar"))) - msg := pb.Message_BlockPresence{Cid: pb.Cid{Cid: c}} - actual, err := msg.Marshal() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(actual, expected[:]) { - t.Fatal("failed to correctly encode custom CID type") - } -} diff --git a/message/pb/message.pb.go b/message/pb/message.pb.go index ef98a0a9..39379223 100644 --- a/message/pb/message.pb.go +++ b/message/pb/message.pb.go @@ -1,1569 +1,62 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: message.proto - package bitswap_message_pb import ( - fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + libipfs "github.com/ipfs/go-libipfs/bitswap/message/pb" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Message_BlockPresenceType int32 +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb. instead +type Message_BlockPresenceType = libipfs.Message_BlockPresenceType const ( - Message_Have Message_BlockPresenceType = 0 - Message_DontHave Message_BlockPresenceType = 1 + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_Have instead + Message_Have Message_BlockPresenceType = libipfs.Message_Have + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_DontHave instead + Message_DontHave Message_BlockPresenceType = libipfs.Message_DontHave ) -var Message_BlockPresenceType_name = map[int32]string{ - 0: "Have", - 1: "DontHave", -} - -var Message_BlockPresenceType_value = map[string]int32{ - "Have": 0, - "DontHave": 1, -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_BlockPresenceType_name instead +var Message_BlockPresenceType_name = libipfs.Message_BlockPresenceType_name -func (x Message_BlockPresenceType) String() string { - return proto.EnumName(Message_BlockPresenceType_name, int32(x)) -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_BlockPresenceType_value instead +var Message_BlockPresenceType_value = libipfs.Message_BlockPresenceType_value -func (Message_BlockPresenceType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} -} - -type Message_Wantlist_WantType int32 +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_Wantlist_WantType instead +type Message_Wantlist_WantType = libipfs.Message_Wantlist_WantType const ( - Message_Wantlist_Block Message_Wantlist_WantType = 0 - Message_Wantlist_Have Message_Wantlist_WantType = 1 + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_Wantlist_Block instead + Message_Wantlist_Block Message_Wantlist_WantType = libipfs.Message_Wantlist_Block + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_Wantlist_Have instead + Message_Wantlist_Have Message_Wantlist_WantType = libipfs.Message_Wantlist_Have ) -var Message_Wantlist_WantType_name = map[int32]string{ - 0: "Block", - 1: "Have", -} - -var Message_Wantlist_WantType_value = map[string]int32{ - "Block": 0, - "Have": 1, -} - -func (x Message_Wantlist_WantType) String() string { - return proto.EnumName(Message_Wantlist_WantType_name, int32(x)) -} - -func (Message_Wantlist_WantType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} -} - -type Message struct { - Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` - Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` - Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` - BlockPresences []Message_BlockPresence `protobuf:"bytes,4,rep,name=blockPresences,proto3" json:"blockPresences"` - PendingBytes int32 `protobuf:"varint,5,opt,name=pendingBytes,proto3" json:"pendingBytes,omitempty"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_33c57e4bae7b9afd, []int{0} -} -func (m *Message) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(m, src) -} -func (m *Message) XXX_Size() int { - return m.Size() -} -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) -} - -var xxx_messageInfo_Message proto.InternalMessageInfo - -func (m *Message) GetWantlist() Message_Wantlist { - if m != nil { - return m.Wantlist - } - return Message_Wantlist{} -} - -func (m *Message) GetBlocks() [][]byte { - if m != nil { - return m.Blocks - } - return nil -} - -func (m *Message) GetPayload() []Message_Block { - if m != nil { - return m.Payload - } - return nil -} - -func (m *Message) GetBlockPresences() []Message_BlockPresence { - if m != nil { - return m.BlockPresences - } - return nil -} - -func (m *Message) GetPendingBytes() int32 { - if m != nil { - return m.PendingBytes - } - return 0 -} - -type Message_Wantlist struct { - Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` - Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` -} - -func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} } -func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) } -func (*Message_Wantlist) ProtoMessage() {} -func (*Message_Wantlist) Descriptor() ([]byte, []int) { - return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} -} -func (m *Message_Wantlist) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message_Wantlist.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Message_Wantlist) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_Wantlist.Merge(m, src) -} -func (m *Message_Wantlist) XXX_Size() int { - return m.Size() -} -func (m *Message_Wantlist) XXX_DiscardUnknown() { - xxx_messageInfo_Message_Wantlist.DiscardUnknown(m) -} - -var xxx_messageInfo_Message_Wantlist proto.InternalMessageInfo - -func (m *Message_Wantlist) GetEntries() []Message_Wantlist_Entry { - if m != nil { - return m.Entries - } - return nil -} - -func (m *Message_Wantlist) GetFull() bool { - if m != nil { - return m.Full - } - return false -} - -type Message_Wantlist_Entry struct { - Block Cid `protobuf:"bytes,1,opt,name=block,proto3,customtype=Cid" json:"block"` - Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` - Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` - WantType Message_Wantlist_WantType `protobuf:"varint,4,opt,name=wantType,proto3,enum=bitswap.message.pb.Message_Wantlist_WantType" json:"wantType,omitempty"` - SendDontHave bool `protobuf:"varint,5,opt,name=sendDontHave,proto3" json:"sendDontHave,omitempty"` -} - -func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } -func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) } -func (*Message_Wantlist_Entry) ProtoMessage() {} -func (*Message_Wantlist_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} -} -func (m *Message_Wantlist_Entry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message_Wantlist_Entry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Message_Wantlist_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_Wantlist_Entry.Merge(m, src) -} -func (m *Message_Wantlist_Entry) XXX_Size() int { - return m.Size() -} -func (m *Message_Wantlist_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_Message_Wantlist_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_Message_Wantlist_Entry proto.InternalMessageInfo - -func (m *Message_Wantlist_Entry) GetPriority() int32 { - if m != nil { - return m.Priority - } - return 0 -} - -func (m *Message_Wantlist_Entry) GetCancel() bool { - if m != nil { - return m.Cancel - } - return false -} - -func (m *Message_Wantlist_Entry) GetWantType() Message_Wantlist_WantType { - if m != nil { - return m.WantType - } - return Message_Wantlist_Block -} - -func (m *Message_Wantlist_Entry) GetSendDontHave() bool { - if m != nil { - return m.SendDontHave - } - return false -} - -type Message_Block struct { - Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *Message_Block) Reset() { *m = Message_Block{} } -func (m *Message_Block) String() string { return proto.CompactTextString(m) } -func (*Message_Block) ProtoMessage() {} -func (*Message_Block) Descriptor() ([]byte, []int) { - return fileDescriptor_33c57e4bae7b9afd, []int{0, 1} -} -func (m *Message_Block) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message_Block.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Message_Block) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_Block.Merge(m, src) -} -func (m *Message_Block) XXX_Size() int { - return m.Size() -} -func (m *Message_Block) XXX_DiscardUnknown() { - xxx_messageInfo_Message_Block.DiscardUnknown(m) -} - -var xxx_messageInfo_Message_Block proto.InternalMessageInfo - -func (m *Message_Block) GetPrefix() []byte { - if m != nil { - return m.Prefix - } - return nil -} - -func (m *Message_Block) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type Message_BlockPresence struct { - Cid Cid `protobuf:"bytes,1,opt,name=cid,proto3,customtype=Cid" json:"cid"` - Type Message_BlockPresenceType `protobuf:"varint,2,opt,name=type,proto3,enum=bitswap.message.pb.Message_BlockPresenceType" json:"type,omitempty"` -} - -func (m *Message_BlockPresence) Reset() { *m = Message_BlockPresence{} } -func (m *Message_BlockPresence) String() string { return proto.CompactTextString(m) } -func (*Message_BlockPresence) ProtoMessage() {} -func (*Message_BlockPresence) Descriptor() ([]byte, []int) { - return fileDescriptor_33c57e4bae7b9afd, []int{0, 2} -} -func (m *Message_BlockPresence) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message_BlockPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message_BlockPresence.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Message_BlockPresence) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_BlockPresence.Merge(m, src) -} -func (m *Message_BlockPresence) XXX_Size() int { - return m.Size() -} -func (m *Message_BlockPresence) XXX_DiscardUnknown() { - xxx_messageInfo_Message_BlockPresence.DiscardUnknown(m) -} - -var xxx_messageInfo_Message_BlockPresence proto.InternalMessageInfo - -func (m *Message_BlockPresence) GetType() Message_BlockPresenceType { - if m != nil { - return m.Type - } - return Message_Have -} - -func init() { - proto.RegisterEnum("bitswap.message.pb.Message_BlockPresenceType", Message_BlockPresenceType_name, Message_BlockPresenceType_value) - proto.RegisterEnum("bitswap.message.pb.Message_Wantlist_WantType", Message_Wantlist_WantType_name, Message_Wantlist_WantType_value) - proto.RegisterType((*Message)(nil), "bitswap.message.pb.Message") - proto.RegisterType((*Message_Wantlist)(nil), "bitswap.message.pb.Message.Wantlist") - proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") - proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") - proto.RegisterType((*Message_BlockPresence)(nil), "bitswap.message.pb.Message.BlockPresence") -} - -func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } - -var fileDescriptor_33c57e4bae7b9afd = []byte{ - // 497 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xdf, 0x8a, 0xd3, 0x40, - 0x14, 0xc6, 0x33, 0x4d, 0xd2, 0xc6, 0xd3, 0xee, 0x52, 0xe7, 0x42, 0x42, 0xc0, 0x34, 0x5b, 0xbc, - 0x88, 0xca, 0x66, 0xa1, 0xfb, 0x04, 0x5b, 0xff, 0xa0, 0x82, 0x20, 0x83, 0xd0, 0xeb, 0xfc, 0x99, - 0xd6, 0xc1, 0x6c, 0x12, 0x33, 0x53, 0xd7, 0xbe, 0x85, 0x8f, 0xb5, 0x37, 0xc2, 0x5e, 0x8a, 0xca, - 0x22, 0xed, 0x8b, 0x48, 0x4e, 0xa6, 0x85, 0xba, 0xe2, 0xee, 0xdd, 0x9c, 0x33, 0xe7, 0xfb, 0x65, - 0xbe, 0xef, 0x10, 0x38, 0x38, 0xe7, 0x52, 0xc6, 0x0b, 0x1e, 0x55, 0x75, 0xa9, 0x4a, 0x4a, 0x13, - 0xa1, 0xe4, 0x45, 0x5c, 0x45, 0xbb, 0x76, 0xe2, 0x1d, 0x2f, 0x84, 0xfa, 0xb0, 0x4c, 0xa2, 0xb4, - 0x3c, 0x3f, 0x59, 0x94, 0x8b, 0xf2, 0x04, 0x47, 0x93, 0xe5, 0x1c, 0x2b, 0x2c, 0xf0, 0xd4, 0x22, - 0xc6, 0xbf, 0xba, 0xd0, 0x7b, 0xdb, 0xaa, 0xe9, 0x4b, 0x70, 0x2e, 0xe2, 0x42, 0xe5, 0x42, 0x2a, - 0x97, 0x04, 0x24, 0xec, 0x4f, 0x1e, 0x45, 0x37, 0xbf, 0x10, 0xe9, 0xf1, 0x68, 0xa6, 0x67, 0xa7, - 0xd6, 0xe5, 0xf5, 0xc8, 0x60, 0x3b, 0x2d, 0x7d, 0x00, 0xdd, 0x24, 0x2f, 0xd3, 0x8f, 0xd2, 0xed, - 0x04, 0x66, 0x38, 0x60, 0xba, 0xa2, 0x67, 0xd0, 0xab, 0xe2, 0x55, 0x5e, 0xc6, 0x99, 0x6b, 0x06, - 0x66, 0xd8, 0x9f, 0x1c, 0xfd, 0x0f, 0x3f, 0x6d, 0x44, 0x9a, 0xbd, 0xd5, 0xd1, 0x19, 0x1c, 0x22, - 0xec, 0x5d, 0xcd, 0x25, 0x2f, 0x52, 0x2e, 0x5d, 0x0b, 0x49, 0x8f, 0x6f, 0x25, 0x6d, 0x15, 0x9a, - 0xf8, 0x17, 0x86, 0x8e, 0x61, 0x50, 0xf1, 0x22, 0x13, 0xc5, 0x62, 0xba, 0x52, 0x5c, 0xba, 0x76, - 0x40, 0x42, 0x9b, 0xed, 0xf5, 0xbc, 0x9f, 0x1d, 0x70, 0xb6, 0xa6, 0xe9, 0x1b, 0xe8, 0xf1, 0x42, - 0xd5, 0x82, 0x4b, 0x97, 0xe0, 0x13, 0x9e, 0xdc, 0x25, 0xab, 0xe8, 0x45, 0xa1, 0xea, 0xd5, 0xd6, - 0x95, 0x06, 0x50, 0x0a, 0xd6, 0x7c, 0x99, 0xe7, 0x6e, 0x27, 0x20, 0xa1, 0xc3, 0xf0, 0xec, 0x7d, - 0x23, 0x60, 0xe3, 0x30, 0x3d, 0x02, 0x1b, 0x1f, 0x8b, 0x3b, 0x19, 0x4c, 0xfb, 0x8d, 0xf6, 0xc7, - 0xf5, 0xc8, 0x7c, 0x26, 0x32, 0xd6, 0xde, 0x50, 0x0f, 0x9c, 0xaa, 0x16, 0x65, 0x2d, 0xd4, 0x0a, - 0x21, 0x36, 0xdb, 0xd5, 0xcd, 0x36, 0xd2, 0xb8, 0x48, 0x79, 0xee, 0x9a, 0x88, 0xd7, 0x15, 0x7d, - 0xdd, 0x6e, 0xfb, 0xfd, 0xaa, 0xe2, 0xae, 0x15, 0x90, 0xf0, 0x70, 0x72, 0x7c, 0x27, 0x07, 0x33, - 0x2d, 0x62, 0x3b, 0x79, 0x13, 0x9e, 0xe4, 0x45, 0xf6, 0xbc, 0x2c, 0xd4, 0xab, 0xf8, 0x33, 0xc7, - 0xf0, 0x1c, 0xb6, 0xd7, 0x1b, 0x8f, 0xda, 0xec, 0x70, 0xfe, 0x1e, 0xd8, 0xb8, 0x93, 0xa1, 0x41, - 0x1d, 0xb0, 0x9a, 0xeb, 0x21, 0xf1, 0x4e, 0x75, 0xb3, 0x79, 0x70, 0x55, 0xf3, 0xb9, 0xf8, 0xd2, - 0x1a, 0x66, 0xba, 0x6a, 0x52, 0xca, 0x62, 0x15, 0xa3, 0xc1, 0x01, 0xc3, 0xb3, 0xf7, 0x09, 0x0e, - 0xf6, 0xb6, 0x4b, 0x1f, 0x82, 0x99, 0x8a, 0xec, 0x5f, 0x51, 0x35, 0x7d, 0x7a, 0x06, 0x96, 0x6a, - 0x0c, 0x77, 0x6e, 0x37, 0xbc, 0xc7, 0x45, 0xc3, 0x28, 0x1d, 0x3f, 0x85, 0xfb, 0x37, 0xae, 0x76, - 0x36, 0x0c, 0x3a, 0x00, 0x67, 0xeb, 0x79, 0x48, 0xa6, 0xee, 0xe5, 0xda, 0x27, 0x57, 0x6b, 0x9f, - 0xfc, 0x5e, 0xfb, 0xe4, 0xeb, 0xc6, 0x37, 0xae, 0x36, 0xbe, 0xf1, 0x7d, 0xe3, 0x1b, 0x49, 0x17, - 0xff, 0xbf, 0xd3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x8a, 0xaf, 0x83, 0xd3, 0x03, 0x00, - 0x00, -} - -func (m *Message) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PendingBytes != 0 { - i = encodeVarintMessage(dAtA, i, uint64(m.PendingBytes)) - i-- - dAtA[i] = 0x28 - } - if len(m.BlockPresences) > 0 { - for iNdEx := len(m.BlockPresences) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.BlockPresences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessage(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Payload) > 0 { - for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessage(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Blocks) > 0 { - for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Blocks[iNdEx]) - copy(dAtA[i:], m.Blocks[iNdEx]) - i = encodeVarintMessage(dAtA, i, uint64(len(m.Blocks[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Wantlist.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessage(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Message_Wantlist) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message_Wantlist) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message_Wantlist) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Full { - i-- - if m.Full { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Entries) > 0 { - for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessage(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Message_Wantlist_Entry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message_Wantlist_Entry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message_Wantlist_Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SendDontHave { - i-- - if m.SendDontHave { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.WantType != 0 { - i = encodeVarintMessage(dAtA, i, uint64(m.WantType)) - i-- - dAtA[i] = 0x20 - } - if m.Cancel { - i-- - if m.Cancel { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.Priority != 0 { - i = encodeVarintMessage(dAtA, i, uint64(m.Priority)) - i-- - dAtA[i] = 0x10 - } - { - size := m.Block.Size() - i -= size - if _, err := m.Block.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintMessage(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Message_Block) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message_Block) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message_Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintMessage(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - if len(m.Prefix) > 0 { - i -= len(m.Prefix) - copy(dAtA[i:], m.Prefix) - i = encodeVarintMessage(dAtA, i, uint64(len(m.Prefix))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Message_BlockPresence) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message_BlockPresence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message_BlockPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Type != 0 { - i = encodeVarintMessage(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x10 - } - { - size := m.Cid.Size() - i -= size - if _, err := m.Cid.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintMessage(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { - offset -= sovMessage(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Message) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Wantlist.Size() - n += 1 + l + sovMessage(uint64(l)) - if len(m.Blocks) > 0 { - for _, b := range m.Blocks { - l = len(b) - n += 1 + l + sovMessage(uint64(l)) - } - } - if len(m.Payload) > 0 { - for _, e := range m.Payload { - l = e.Size() - n += 1 + l + sovMessage(uint64(l)) - } - } - if len(m.BlockPresences) > 0 { - for _, e := range m.BlockPresences { - l = e.Size() - n += 1 + l + sovMessage(uint64(l)) - } - } - if m.PendingBytes != 0 { - n += 1 + sovMessage(uint64(m.PendingBytes)) - } - return n -} - -func (m *Message_Wantlist) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Entries) > 0 { - for _, e := range m.Entries { - l = e.Size() - n += 1 + l + sovMessage(uint64(l)) - } - } - if m.Full { - n += 2 - } - return n -} - -func (m *Message_Wantlist_Entry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Block.Size() - n += 1 + l + sovMessage(uint64(l)) - if m.Priority != 0 { - n += 1 + sovMessage(uint64(m.Priority)) - } - if m.Cancel { - n += 2 - } - if m.WantType != 0 { - n += 1 + sovMessage(uint64(m.WantType)) - } - if m.SendDontHave { - n += 2 - } - return n -} - -func (m *Message_Block) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Prefix) - if l > 0 { - n += 1 + l + sovMessage(uint64(l)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovMessage(uint64(l)) - } - return n -} - -func (m *Message_BlockPresence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Cid.Size() - n += 1 + l + sovMessage(uint64(l)) - if m.Type != 0 { - n += 1 + sovMessage(uint64(m.Type)) - } - return n -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_Wantlist_WantType_name instead +var Message_Wantlist_WantType_name = libipfs.Message_Wantlist_WantType_name -func sovMessage(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMessage(x uint64) (n int) { - return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Message) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Message: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Wantlist", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Wantlist.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMessage - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMessage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Blocks = append(m.Blocks, make([]byte, postIndex-iNdEx)) - copy(m.Blocks[len(m.Blocks)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payload = append(m.Payload, Message_Block{}) - if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockPresences", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BlockPresences = append(m.BlockPresences, Message_BlockPresence{}) - if err := m.BlockPresences[len(m.BlockPresences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PendingBytes", wireType) - } - m.PendingBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PendingBytes |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMessage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_Wantlist_WantType_value instead +var Message_Wantlist_WantType_value = libipfs.Message_Wantlist_WantType_value - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Wantlist: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Wantlist: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entries = append(m.Entries, Message_Wantlist_Entry{}) - if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Full", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Full = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipMessage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message instead +type Message = libipfs.Message - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Entry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMessage - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMessage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - m.Priority = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Priority |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cancel", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Cancel = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WantType", wireType) - } - m.WantType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WantType |= Message_Wantlist_WantType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SendDontHave", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SendDontHave = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipMessage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_Wantlist instead +type Message_Wantlist = libipfs.Message_Wantlist - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Message_Block) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Block: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMessage - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMessage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) - if m.Prefix == nil { - m.Prefix = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMessage - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMessage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_Wantlist_Entry instead +type Message_Wantlist_Entry = libipfs.Message_Wantlist_Entry - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlockPresence: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlockPresence: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMessage - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMessage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Cid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Message_BlockPresenceType(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMessage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_Block instead +type Message_Block = libipfs.Message_Block - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMessage(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMessage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMessage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMessage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMessage - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMessage - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMessage - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.Message_BlockPresence instead +type Message_BlockPresence = libipfs.Message_BlockPresence var ( - ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.ErrInvalidLengthMessage instead + ErrInvalidLengthMessage = libipfs.ErrInvalidLengthMessage + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.ErrIntOverflowMessage instead + ErrIntOverflowMessage = libipfs.ErrIntOverflowMessage + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/message/pb.ErrUnexpectedEndOfGroupMessage instead + ErrUnexpectedEndOfGroupMessage = libipfs.ErrUnexpectedEndOfGroupMessage ) diff --git a/message/pb/message.proto b/message/pb/message.proto deleted file mode 100644 index e6c271cc..00000000 --- a/message/pb/message.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package bitswap.message.pb; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -message Message { - - message Wantlist { - enum WantType { - Block = 0; - Have = 1; - } - - message Entry { - bytes block = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) - int32 priority = 2; // the priority (normalized). default to 1 - bool cancel = 3; // whether this revokes an entry - WantType wantType = 4; // Note: defaults to enum 0, ie Block - bool sendDontHave = 5; // Note: defaults to false - } - - repeated Entry entries = 1 [(gogoproto.nullable) = false]; // a list of wantlist entries - bool full = 2; // whether this is the full wantlist. default to false - } - - message Block { - bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) - bytes data = 2; - } - - enum BlockPresenceType { - Have = 0; - DontHave = 1; - } - message BlockPresence { - bytes cid = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; - BlockPresenceType type = 2; - } - - Wantlist wantlist = 1 [(gogoproto.nullable) = false]; - repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 - repeated Block payload = 3 [(gogoproto.nullable) = false]; // used to send Blocks in bitswap 1.1.0 - repeated BlockPresence blockPresences = 4 [(gogoproto.nullable) = false]; - int32 pendingBytes = 5; -} diff --git a/metrics/metrics.go b/metrics/metrics.go index b7192372..0b8897b3 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -3,44 +3,46 @@ package metrics import ( "context" + libipfs "github.com/ipfs/go-libipfs/bitswap/metrics" "github.com/ipfs/go-metrics-interface" ) -var ( - // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size - metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} - - timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} -) - +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/metrics.DupHist instead func DupHist(ctx context.Context) metrics.Histogram { - return metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) + return libipfs.DupHist(ctx) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/metrics.AllHist instead func AllHist(ctx context.Context) metrics.Histogram { - return metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) + return libipfs.AllHist(ctx) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/metrics.SentHist instead func SentHist(ctx context.Context) metrics.Histogram { - return metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) + return libipfs.SentHist(ctx) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/metrics.SendTimeHist instead func SendTimeHist(ctx context.Context) metrics.Histogram { - return metrics.NewCtx(ctx, "send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) + return libipfs.SendTimeHist(ctx) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/metrics.PendingEngineGauge instead func PendingEngineGauge(ctx context.Context) metrics.Gauge { - return metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() + return libipfs.PendingEngineGauge(ctx) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/metrics.ActiveEngineGauge instead func ActiveEngineGauge(ctx context.Context) metrics.Gauge { - return metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() + return libipfs.ActiveEngineGauge(ctx) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/metrics.PendingBlocksGauge instead func PendingBlocksGauge(ctx context.Context) metrics.Gauge { - return metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() + return libipfs.PendingBlocksGauge(ctx) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/metrics.ActiveBlocksGauge instead func ActiveBlocksGauge(ctx context.Context) metrics.Gauge { - return metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() + return libipfs.ActiveBlocksGauge(ctx) } diff --git a/network/connecteventmanager.go b/network/connecteventmanager.go index 88337fce..e4ba3931 100644 --- a/network/connecteventmanager.go +++ b/network/connecteventmanager.go @@ -1,218 +1,8 @@ package network import ( - "sync" - - "github.com/libp2p/go-libp2p/core/peer" -) - -type ConnectionListener interface { - PeerConnected(peer.ID) - PeerDisconnected(peer.ID) -} - -type state byte - -const ( - stateDisconnected = iota - stateResponsive - stateUnresponsive + libipfs "github.com/ipfs/go-libipfs/bitswap/network" ) -type connectEventManager struct { - connListeners []ConnectionListener - lk sync.RWMutex - cond sync.Cond - peers map[peer.ID]*peerState - - changeQueue []peer.ID - stop bool - done chan struct{} -} - -type peerState struct { - newState, curState state - pending bool -} - -func newConnectEventManager(connListeners ...ConnectionListener) *connectEventManager { - evtManager := &connectEventManager{ - connListeners: connListeners, - peers: make(map[peer.ID]*peerState), - done: make(chan struct{}), - } - evtManager.cond = sync.Cond{L: &evtManager.lk} - return evtManager -} - -func (c *connectEventManager) Start() { - go c.worker() -} - -func (c *connectEventManager) Stop() { - c.lk.Lock() - c.stop = true - c.lk.Unlock() - c.cond.Broadcast() - - <-c.done -} - -func (c *connectEventManager) getState(p peer.ID) state { - if state, ok := c.peers[p]; ok { - return state.newState - } else { - return stateDisconnected - } -} - -func (c *connectEventManager) setState(p peer.ID, newState state) { - state, ok := c.peers[p] - if !ok { - state = new(peerState) - c.peers[p] = state - } - state.newState = newState - if !state.pending && state.newState != state.curState { - state.pending = true - c.changeQueue = append(c.changeQueue, p) - c.cond.Broadcast() - } -} - -// Waits for a change to be enqueued, or for the event manager to be stopped. Returns false if the -// connect event manager has been stopped. -func (c *connectEventManager) waitChange() bool { - for !c.stop && len(c.changeQueue) == 0 { - c.cond.Wait() - } - return !c.stop -} - -func (c *connectEventManager) worker() { - c.lk.Lock() - defer c.lk.Unlock() - defer close(c.done) - - for c.waitChange() { - pid := c.changeQueue[0] - c.changeQueue[0] = peer.ID("") // free the peer ID (slicing won't do that) - c.changeQueue = c.changeQueue[1:] - - state, ok := c.peers[pid] - // If we've disconnected and forgotten, continue. - if !ok { - // This shouldn't be possible because _this_ thread is responsible for - // removing peers from this map, and we shouldn't get duplicate entries in - // the change queue. - log.Error("a change was enqueued for a peer we're not tracking") - continue - } - - // Record the fact that this "state" is no longer in the queue. - state.pending = false - - // Then, if there's nothing to do, continue. - if state.curState == state.newState { - continue - } - - // Or record the state update, then apply it. - oldState := state.curState - state.curState = state.newState - - switch state.newState { - case stateDisconnected: - delete(c.peers, pid) - fallthrough - case stateUnresponsive: - // Only trigger a disconnect event if the peer was responsive. - // We could be transitioning from unresponsive to disconnected. - if oldState == stateResponsive { - c.lk.Unlock() - for _, v := range c.connListeners { - v.PeerDisconnected(pid) - } - c.lk.Lock() - } - case stateResponsive: - c.lk.Unlock() - for _, v := range c.connListeners { - v.PeerConnected(pid) - } - c.lk.Lock() - } - } -} - -// Called whenever we receive a new connection. May be called many times. -func (c *connectEventManager) Connected(p peer.ID) { - c.lk.Lock() - defer c.lk.Unlock() - - // !responsive -> responsive - - if c.getState(p) == stateResponsive { - return - } - c.setState(p, stateResponsive) -} - -// Called when we drop the final connection to a peer. -func (c *connectEventManager) Disconnected(p peer.ID) { - c.lk.Lock() - defer c.lk.Unlock() - - // !disconnected -> disconnected - - if c.getState(p) == stateDisconnected { - return - } - - c.setState(p, stateDisconnected) -} - -// Called whenever a peer is unresponsive. -func (c *connectEventManager) MarkUnresponsive(p peer.ID) { - c.lk.Lock() - defer c.lk.Unlock() - - // responsive -> unresponsive - - if c.getState(p) != stateResponsive { - return - } - - c.setState(p, stateUnresponsive) -} - -// Called whenever we receive a message from a peer. -// -// - When we're connected to the peer, this will mark the peer as responsive (from unresponsive). -// - When not connected, we ignore this call. Unfortunately, a peer may disconnect before we process -// -// the "on message" event, so we can't treat this as evidence of a connection. -func (c *connectEventManager) OnMessage(p peer.ID) { - c.lk.RLock() - unresponsive := c.getState(p) == stateUnresponsive - c.lk.RUnlock() - - // Only continue if both connected, and unresponsive. - if !unresponsive { - return - } - - // unresponsive -> responsive - - // We need to make a modification so now take a write lock - c.lk.Lock() - defer c.lk.Unlock() - - // Note: state may have changed in the time between when read lock - // was released and write lock taken, so check again - if c.getState(p) != stateUnresponsive { - return - } - - c.setState(p, stateResponsive) -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.ConnectionListener instead +type ConnectionListener = libipfs.ConnectionListener diff --git a/network/connecteventmanager_test.go b/network/connecteventmanager_test.go deleted file mode 100644 index 6696c028..00000000 --- a/network/connecteventmanager_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package network - -import ( - "sync" - "testing" - "time" - - "github.com/ipfs/go-bitswap/internal/testutil" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" -) - -type mockConnEvent struct { - connected bool - peer peer.ID -} - -type mockConnListener struct { - sync.Mutex - events []mockConnEvent -} - -func newMockConnListener() *mockConnListener { - return new(mockConnListener) -} - -func (cl *mockConnListener) PeerConnected(p peer.ID) { - cl.Lock() - defer cl.Unlock() - cl.events = append(cl.events, mockConnEvent{connected: true, peer: p}) -} - -func (cl *mockConnListener) PeerDisconnected(p peer.ID) { - cl.Lock() - defer cl.Unlock() - cl.events = append(cl.events, mockConnEvent{connected: false, peer: p}) -} - -func wait(t *testing.T, c *connectEventManager) { - require.Eventually(t, func() bool { - c.lk.RLock() - defer c.lk.RUnlock() - return len(c.changeQueue) == 0 - }, time.Second, time.Millisecond, "connection event manager never processed events") -} - -func TestConnectEventManagerConnectDisconnect(t *testing.T) { - connListener := newMockConnListener() - peers := testutil.GeneratePeers(2) - cem := newConnectEventManager(connListener) - cem.Start() - t.Cleanup(cem.Stop) - - var expectedEvents []mockConnEvent - - // Connect A twice, should only see one event - cem.Connected(peers[0]) - cem.Connected(peers[0]) - expectedEvents = append(expectedEvents, mockConnEvent{ - peer: peers[0], - connected: true, - }) - - // Flush the event queue. - wait(t, cem) - require.Equal(t, expectedEvents, connListener.events) - - // Block up the event loop. - connListener.Lock() - cem.Connected(peers[1]) - expectedEvents = append(expectedEvents, mockConnEvent{ - peer: peers[1], - connected: true, - }) - - // We don't expect this to show up. - cem.Disconnected(peers[0]) - cem.Connected(peers[0]) - - connListener.Unlock() - - wait(t, cem) - require.Equal(t, expectedEvents, connListener.events) -} - -func TestConnectEventManagerMarkUnresponsive(t *testing.T) { - connListener := newMockConnListener() - p := testutil.GeneratePeers(1)[0] - cem := newConnectEventManager(connListener) - cem.Start() - t.Cleanup(cem.Stop) - - var expectedEvents []mockConnEvent - - // Don't mark as connected when we receive a message (could have been delayed). - cem.OnMessage(p) - wait(t, cem) - require.Equal(t, expectedEvents, connListener.events) - - // Handle connected event. - cem.Connected(p) - wait(t, cem) - - expectedEvents = append(expectedEvents, mockConnEvent{ - peer: p, - connected: true, - }) - require.Equal(t, expectedEvents, connListener.events) - - // Becomes unresponsive. - cem.MarkUnresponsive(p) - wait(t, cem) - - expectedEvents = append(expectedEvents, mockConnEvent{ - peer: p, - connected: false, - }) - require.Equal(t, expectedEvents, connListener.events) - - // We have a new connection, mark them responsive. - cem.Connected(p) - wait(t, cem) - expectedEvents = append(expectedEvents, mockConnEvent{ - peer: p, - connected: true, - }) - require.Equal(t, expectedEvents, connListener.events) - - // No duplicate event. - cem.OnMessage(p) - wait(t, cem) - require.Equal(t, expectedEvents, connListener.events) -} - -func TestConnectEventManagerDisconnectAfterMarkUnresponsive(t *testing.T) { - connListener := newMockConnListener() - p := testutil.GeneratePeers(1)[0] - cem := newConnectEventManager(connListener) - cem.Start() - t.Cleanup(cem.Stop) - - var expectedEvents []mockConnEvent - - // Handle connected event. - cem.Connected(p) - wait(t, cem) - - expectedEvents = append(expectedEvents, mockConnEvent{ - peer: p, - connected: true, - }) - require.Equal(t, expectedEvents, connListener.events) - - // Becomes unresponsive. - cem.MarkUnresponsive(p) - wait(t, cem) - - expectedEvents = append(expectedEvents, mockConnEvent{ - peer: p, - connected: false, - }) - require.Equal(t, expectedEvents, connListener.events) - - cem.Disconnected(p) - wait(t, cem) - require.Empty(t, cem.peers) // all disconnected - require.Equal(t, expectedEvents, connListener.events) -} diff --git a/network/interface.go b/network/interface.go index c58c3169..f40216de 100644 --- a/network/interface.go +++ b/network/interface.go @@ -1,111 +1,51 @@ package network import ( - "context" - "time" - - bsmsg "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/network/internal" - - cid "github.com/ipfs/go-cid" - - "github.com/libp2p/go-libp2p/core/connmgr" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" + libipfs "github.com/ipfs/go-libipfs/bitswap/network" ) var ( // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol - ProtocolBitswapNoVers = internal.ProtocolBitswapNoVers + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.BitSwapNetwork instead + ProtocolBitswapNoVers = libipfs.ProtocolBitswapNoVers // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol - ProtocolBitswapOneZero = internal.ProtocolBitswapOneZero + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.ProtocolBitswapOneZero instead + ProtocolBitswapOneZero = libipfs.ProtocolBitswapOneZero // ProtocolBitswapOneOne is the the prefix for version 1.1.0 - ProtocolBitswapOneOne = internal.ProtocolBitswapOneOne + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.ProtocolBitswapOneOne instead + ProtocolBitswapOneOne = libipfs.ProtocolBitswapOneOne // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 - ProtocolBitswap = internal.ProtocolBitswap + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.ProtocolBitswap instead + ProtocolBitswap = libipfs.ProtocolBitswap ) // BitSwapNetwork provides network connectivity for BitSwap sessions. -type BitSwapNetwork interface { - Self() peer.ID - - // SendMessage sends a BitSwap message to a peer. - SendMessage( - context.Context, - peer.ID, - bsmsg.BitSwapMessage) error - - // Start registers the Reciver and starts handling new messages, connectivity events, etc. - Start(...Receiver) - // Stop stops the network service. - Stop() - - ConnectTo(context.Context, peer.ID) error - DisconnectFrom(context.Context, peer.ID) error - - NewMessageSender(context.Context, peer.ID, *MessageSenderOpts) (MessageSender, error) - - ConnectionManager() connmgr.ConnManager - - Stats() Stats - - Routing - - Pinger -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.BitSwapNetwork instead +type BitSwapNetwork = libipfs.BitSwapNetwork // MessageSender is an interface for sending a series of messages over the bitswap // network -type MessageSender interface { - SendMsg(context.Context, bsmsg.BitSwapMessage) error - Close() error - Reset() error - // Indicates whether the remote peer supports HAVE / DONT_HAVE messages - SupportsHave() bool -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.MessageSender instead +type MessageSender = libipfs.MessageSender -type MessageSenderOpts struct { - MaxRetries int - SendTimeout time.Duration - SendErrorBackoff time.Duration -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.MessageSenderOpts instead +type MessageSenderOpts = libipfs.MessageSenderOpts // Receiver is an interface that can receive messages from the BitSwapNetwork. -type Receiver interface { - ReceiveMessage( - ctx context.Context, - sender peer.ID, - incoming bsmsg.BitSwapMessage) - - ReceiveError(error) - - // Connected/Disconnected warns bitswap about peer connections. - PeerConnected(peer.ID) - PeerDisconnected(peer.ID) -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.Receiver instead +type Receiver = libipfs.Receiver // Routing is an interface to providing and finding providers on a bitswap // network. -type Routing interface { - // FindProvidersAsync returns a channel of providers for the given key. - FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID - - // Provide provides the key to the network. - Provide(context.Context, cid.Cid) error -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.Routing instead +type Routing = libipfs.Routing // Pinger is an interface to ping a peer and get the average latency of all pings -type Pinger interface { - // Ping a peer - Ping(context.Context, peer.ID) ping.Result - // Get the average latency of all pings - Latency(peer.ID) time.Duration -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.Pinger instead +type Pinger = libipfs.Pinger // Stats is a container for statistics about the bitswap network // the numbers inside are specific to bitswap, and not any other protocols // using the same underlying network. -type Stats struct { - MessagesSent uint64 - MessagesRecvd uint64 -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.Stats instead +type Stats = libipfs.Stats diff --git a/network/internal/default.go b/network/internal/default.go deleted file mode 100644 index 13f4936a..00000000 --- a/network/internal/default.go +++ /dev/null @@ -1,23 +0,0 @@ -package internal - -import ( - "github.com/libp2p/go-libp2p/core/protocol" -) - -var ( - // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol - ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" - // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol - ProtocolBitswapOneZero protocol.ID = "/ipfs/bitswap/1.0.0" - // ProtocolBitswapOneOne is the the prefix for version 1.1.0 - ProtocolBitswapOneOne protocol.ID = "/ipfs/bitswap/1.1.0" - // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 - ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.2.0" -) - -var DefaultProtocols = []protocol.ID{ - ProtocolBitswap, - ProtocolBitswapOneOne, - ProtocolBitswapOneZero, - ProtocolBitswapNoVers, -} diff --git a/network/ipfs_impl.go b/network/ipfs_impl.go index 392a00ed..bac6e75b 100644 --- a/network/ipfs_impl.go +++ b/network/ipfs_impl.go @@ -1,472 +1,13 @@ package network import ( - "context" - "errors" - "fmt" - "io" - "sync/atomic" - "time" - - bsmsg "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/network/internal" - - cid "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" - "github.com/libp2p/go-libp2p/core/connmgr" + libipfs "github.com/ipfs/go-libipfs/bitswap/network" "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" - "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" - msgio "github.com/libp2p/go-msgio" - ma "github.com/multiformats/go-multiaddr" - "github.com/multiformats/go-multistream" ) -var log = logging.Logger("bitswap_network") - -var connectTimeout = time.Second * 5 - -var maxSendTimeout = 2 * time.Minute -var minSendTimeout = 10 * time.Second -var sendLatency = 2 * time.Second -var minSendRate = (100 * 1000) / 8 // 100kbit/s - // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.NewFromIpfsHost instead func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { - s := processSettings(opts...) - - bitswapNetwork := impl{ - host: host, - routing: r, - - protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, - protocolBitswapOneZero: s.ProtocolPrefix + ProtocolBitswapOneZero, - protocolBitswapOneOne: s.ProtocolPrefix + ProtocolBitswapOneOne, - protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, - - supportedProtocols: s.SupportedProtocols, - } - - return &bitswapNetwork -} - -func processSettings(opts ...NetOpt) Settings { - s := Settings{SupportedProtocols: append([]protocol.ID(nil), internal.DefaultProtocols...)} - for _, opt := range opts { - opt(&s) - } - for i, proto := range s.SupportedProtocols { - s.SupportedProtocols[i] = s.ProtocolPrefix + proto - } - return s -} - -// impl transforms the ipfs network interface, which sends and receives -// NetMessage objects, into the bitswap network interface. -type impl struct { - // NOTE: Stats must be at the top of the heap allocation to ensure 64bit - // alignment. - stats Stats - - host host.Host - routing routing.ContentRouting - connectEvtMgr *connectEventManager - - protocolBitswapNoVers protocol.ID - protocolBitswapOneZero protocol.ID - protocolBitswapOneOne protocol.ID - protocolBitswap protocol.ID - - supportedProtocols []protocol.ID - - // inbound messages from the network are forwarded to the receiver - receivers []Receiver -} - -type streamMessageSender struct { - to peer.ID - stream network.Stream - connected bool - bsnet *impl - opts *MessageSenderOpts -} - -// Open a stream to the remote peer -func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, error) { - if s.connected { - return s.stream, nil - } - - tctx, cancel := context.WithTimeout(ctx, s.opts.SendTimeout) - defer cancel() - - if err := s.bsnet.ConnectTo(tctx, s.to); err != nil { - return nil, err - } - - stream, err := s.bsnet.newStreamToPeer(tctx, s.to) - if err != nil { - return nil, err - } - - s.stream = stream - s.connected = true - return s.stream, nil -} - -// Reset the stream -func (s *streamMessageSender) Reset() error { - if s.stream != nil { - err := s.stream.Reset() - s.connected = false - return err - } - return nil -} - -// Close the stream -func (s *streamMessageSender) Close() error { - return s.stream.Close() -} - -// Indicates whether the peer supports HAVE / DONT_HAVE messages -func (s *streamMessageSender) SupportsHave() bool { - return s.bsnet.SupportsHave(s.stream.Protocol()) -} - -// Send a message to the peer, attempting multiple times -func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { - return s.multiAttempt(ctx, func() error { - return s.send(ctx, msg) - }) -} - -// Perform a function with multiple attempts, and a timeout -func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func() error) error { - // Try to call the function repeatedly - var err error - for i := 0; i < s.opts.MaxRetries; i++ { - if err = fn(); err == nil { - // Attempt was successful - return nil - } - - // Attempt failed - - // If the sender has been closed or the context cancelled, just bail out - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Protocol is not supported, so no need to try multiple times - if errors.Is(err, multistream.ErrNotSupported) { - s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) - return err - } - - // Failed to send so reset stream and try again - _ = s.Reset() - - // Failed too many times so mark the peer as unresponsive and return an error - if i == s.opts.MaxRetries-1 { - s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(s.opts.SendErrorBackoff): - // wait a short time in case disconnect notifications are still propagating - log.Infof("send message to %s failed but context was not Done: %s", s.to, err) - } - } - return err -} - -// Send a message to the peer -func (s *streamMessageSender) send(ctx context.Context, msg bsmsg.BitSwapMessage) error { - start := time.Now() - stream, err := s.Connect(ctx) - if err != nil { - log.Infof("failed to open stream to %s: %s", s.to, err) - return err - } - - // The send timeout includes the time required to connect - // (although usually we will already have connected - we only need to - // connect after a failed attempt to send) - timeout := s.opts.SendTimeout - time.Since(start) - if err = s.bsnet.msgToStream(ctx, stream, msg, timeout); err != nil { - log.Infof("failed to send message to %s: %s", s.to, err) - return err - } - - return nil -} - -func (bsnet *impl) Self() peer.ID { - return bsnet.host.ID() -} - -func (bsnet *impl) Ping(ctx context.Context, p peer.ID) ping.Result { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - res := <-ping.Ping(ctx, bsnet.host, p) - return res -} - -func (bsnet *impl) Latency(p peer.ID) time.Duration { - return bsnet.host.Peerstore().LatencyEWMA(p) -} - -// Indicates whether the given protocol supports HAVE / DONT_HAVE messages -func (bsnet *impl) SupportsHave(proto protocol.ID) bool { - switch proto { - case bsnet.protocolBitswapOneOne, bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: - return false - } - return true -} - -func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage, timeout time.Duration) error { - deadline := time.Now().Add(timeout) - if dl, ok := ctx.Deadline(); ok && dl.Before(deadline) { - deadline = dl - } - - if err := s.SetWriteDeadline(deadline); err != nil { - log.Warnf("error setting deadline: %s", err) - } - - // Older Bitswap versions use a slightly different wire format so we need - // to convert the message to the appropriate format depending on the remote - // peer's Bitswap version. - switch s.Protocol() { - case bsnet.protocolBitswapOneOne, bsnet.protocolBitswap: - if err := msg.ToNetV1(s); err != nil { - log.Debugf("error: %s", err) - return err - } - case bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: - if err := msg.ToNetV0(s); err != nil { - log.Debugf("error: %s", err) - return err - } - default: - return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) - } - - atomic.AddUint64(&bsnet.stats.MessagesSent, 1) - - if err := s.SetWriteDeadline(time.Time{}); err != nil { - log.Warnf("error resetting deadline: %s", err) - } - return nil -} - -func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *MessageSenderOpts) (MessageSender, error) { - opts = setDefaultOpts(opts) - - sender := &streamMessageSender{ - to: p, - bsnet: bsnet, - opts: opts, - } - - err := sender.multiAttempt(ctx, func() error { - _, err := sender.Connect(ctx) - return err - }) - - if err != nil { - return nil, err - } - - return sender, nil -} - -func setDefaultOpts(opts *MessageSenderOpts) *MessageSenderOpts { - copy := *opts - if opts.MaxRetries == 0 { - copy.MaxRetries = 3 - } - if opts.SendTimeout == 0 { - copy.SendTimeout = maxSendTimeout - } - if opts.SendErrorBackoff == 0 { - copy.SendErrorBackoff = 100 * time.Millisecond - } - return © -} - -func sendTimeout(size int) time.Duration { - timeout := sendLatency - timeout += time.Duration((uint64(time.Second) * uint64(size)) / uint64(minSendRate)) - if timeout > maxSendTimeout { - timeout = maxSendTimeout - } else if timeout < minSendTimeout { - timeout = minSendTimeout - } - return timeout -} - -func (bsnet *impl) SendMessage( - ctx context.Context, - p peer.ID, - outgoing bsmsg.BitSwapMessage) error { - - tctx, cancel := context.WithTimeout(ctx, connectTimeout) - defer cancel() - - s, err := bsnet.newStreamToPeer(tctx, p) - if err != nil { - return err - } - - timeout := sendTimeout(outgoing.Size()) - if err = bsnet.msgToStream(ctx, s, outgoing, timeout); err != nil { - _ = s.Reset() - return err - } - - return s.Close() -} - -func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { - return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) -} - -func (bsnet *impl) Start(r ...Receiver) { - bsnet.receivers = r - { - connectionListeners := make([]ConnectionListener, len(r)) - for i, v := range r { - connectionListeners[i] = v - } - bsnet.connectEvtMgr = newConnectEventManager(connectionListeners...) - } - for _, proto := range bsnet.supportedProtocols { - bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) - } - bsnet.host.Network().Notify((*netNotifiee)(bsnet)) - bsnet.connectEvtMgr.Start() - -} - -func (bsnet *impl) Stop() { - bsnet.connectEvtMgr.Stop() - bsnet.host.Network().StopNotify((*netNotifiee)(bsnet)) -} - -func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { - return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) -} - -func (bsnet *impl) DisconnectFrom(ctx context.Context, p peer.ID) error { - panic("Not implemented: DisconnectFrom() is only used by tests") -} - -// FindProvidersAsync returns a channel of providers for the given key. -func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { - out := make(chan peer.ID, max) - go func() { - defer close(out) - providers := bsnet.routing.FindProvidersAsync(ctx, k, max) - for info := range providers { - if info.ID == bsnet.host.ID() { - continue // ignore self as provider - } - bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL) - select { - case <-ctx.Done(): - return - case out <- info.ID: - } - } - }() - return out -} - -// Provide provides the key to the network -func (bsnet *impl) Provide(ctx context.Context, k cid.Cid) error { - return bsnet.routing.Provide(ctx, k, true) -} - -// handleNewStream receives a new stream from the network. -func (bsnet *impl) handleNewStream(s network.Stream) { - defer s.Close() - - if len(bsnet.receivers) == 0 { - _ = s.Reset() - return - } - - reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) - for { - received, err := bsmsg.FromMsgReader(reader) - if err != nil { - if err != io.EOF { - _ = s.Reset() - for _, v := range bsnet.receivers { - v.ReceiveError(err) - } - log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) - } - return - } - - p := s.Conn().RemotePeer() - ctx := context.Background() - log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) - bsnet.connectEvtMgr.OnMessage(s.Conn().RemotePeer()) - atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1) - for _, v := range bsnet.receivers { - v.ReceiveMessage(ctx, p, received) - } - } -} - -func (bsnet *impl) ConnectionManager() connmgr.ConnManager { - return bsnet.host.ConnManager() -} - -func (bsnet *impl) Stats() Stats { - return Stats{ - MessagesRecvd: atomic.LoadUint64(&bsnet.stats.MessagesRecvd), - MessagesSent: atomic.LoadUint64(&bsnet.stats.MessagesSent), - } -} - -type netNotifiee impl - -func (nn *netNotifiee) impl() *impl { - return (*impl)(nn) -} - -func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { - // ignore transient connections - if v.Stat().Transient { - return - } - - nn.impl().connectEvtMgr.Connected(v.RemotePeer()) -} -func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { - // Only record a "disconnect" when we actually disconnect. - if n.Connectedness(v.RemotePeer()) == network.Connected { - return - } - - nn.impl().connectEvtMgr.Disconnected(v.RemotePeer()) + return libipfs.NewFromIpfsHost(host, r, opts...) } -func (nn *netNotifiee) OpenedStream(n network.Network, s network.Stream) {} -func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {} -func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {} -func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {} diff --git a/network/ipfs_impl_test.go b/network/ipfs_impl_test.go deleted file mode 100644 index 61f501a5..00000000 --- a/network/ipfs_impl_test.go +++ /dev/null @@ -1,670 +0,0 @@ -package network_test - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - bsmsg "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" - bsnet "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-bitswap/network/internal" - tn "github.com/ipfs/go-bitswap/testnet" - ds "github.com/ipfs/go-datastore" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - mockrouting "github.com/ipfs/go-ipfs-routing/mock" - "github.com/multiformats/go-multistream" - - tnet "github.com/libp2p/go-libp2p-testing/net" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" -) - -// Receiver is an interface for receiving messages from the GraphSyncNetwork. -type receiver struct { - peers map[peer.ID]struct{} - messageReceived chan struct{} - connectionEvent chan bool - lastMessage bsmsg.BitSwapMessage - lastSender peer.ID - listener network.Notifiee -} - -func newReceiver() *receiver { - return &receiver{ - peers: make(map[peer.ID]struct{}), - messageReceived: make(chan struct{}), - // Avoid blocking. 100 is good enough for tests. - connectionEvent: make(chan bool, 100), - } -} - -func (r *receiver) ReceiveMessage( - ctx context.Context, - sender peer.ID, - incoming bsmsg.BitSwapMessage) { - r.lastSender = sender - r.lastMessage = incoming - select { - case <-ctx.Done(): - case r.messageReceived <- struct{}{}: - } -} - -func (r *receiver) ReceiveError(err error) { -} - -func (r *receiver) PeerConnected(p peer.ID) { - r.peers[p] = struct{}{} - r.connectionEvent <- true -} - -func (r *receiver) PeerDisconnected(p peer.ID) { - delete(r.peers, p) - r.connectionEvent <- false -} - -var errMockNetErr = fmt.Errorf("network err") - -type ErrStream struct { - network.Stream - lk sync.Mutex - err error - timingOut bool - closed bool -} - -type ErrHost struct { - host.Host - lk sync.Mutex - err error - timingOut bool - streams []*ErrStream -} - -func (es *ErrStream) Write(b []byte) (int, error) { - es.lk.Lock() - defer es.lk.Unlock() - - if es.err != nil { - return 0, es.err - } - if es.timingOut { - return 0, context.DeadlineExceeded - } - return es.Stream.Write(b) -} - -func (es *ErrStream) Close() error { - es.lk.Lock() - es.closed = true - es.lk.Unlock() - - return es.Stream.Close() -} - -func (eh *ErrHost) Connect(ctx context.Context, pi peer.AddrInfo) error { - eh.lk.Lock() - defer eh.lk.Unlock() - - if eh.err != nil { - return eh.err - } - if eh.timingOut { - return context.DeadlineExceeded - } - return eh.Host.Connect(ctx, pi) -} - -func (eh *ErrHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) { - eh.lk.Lock() - defer eh.lk.Unlock() - - if eh.err != nil { - return nil, errMockNetErr - } - if eh.timingOut { - return nil, context.DeadlineExceeded - } - stream, err := eh.Host.NewStream(ctx, p, pids...) - estrm := &ErrStream{Stream: stream, err: eh.err, timingOut: eh.timingOut} - - eh.streams = append(eh.streams, estrm) - return estrm, err -} - -func (eh *ErrHost) setError(err error) { - eh.lk.Lock() - defer eh.lk.Unlock() - - eh.err = err - for _, s := range eh.streams { - s.lk.Lock() - s.err = err - s.lk.Unlock() - } -} - -func (eh *ErrHost) setTimeoutState(timingOut bool) { - eh.lk.Lock() - defer eh.lk.Unlock() - - eh.timingOut = timingOut - for _, s := range eh.streams { - s.lk.Lock() - s.timingOut = timingOut - s.lk.Unlock() - } -} - -func TestMessageSendAndReceive(t *testing.T) { - // create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - mn := mocknet.New() - defer mn.Close() - mr := mockrouting.NewServer() - streamNet, err := tn.StreamNet(ctx, mn, mr) - if err != nil { - t.Fatal("Unable to setup network") - } - p1 := tnet.RandIdentityOrFatal(t) - p2 := tnet.RandIdentityOrFatal(t) - - bsnet1 := streamNet.Adapter(p1) - bsnet2 := streamNet.Adapter(p2) - r1 := newReceiver() - r2 := newReceiver() - bsnet1.Start(r1) - t.Cleanup(bsnet1.Stop) - bsnet2.Start(r2) - t.Cleanup(bsnet2.Stop) - - err = mn.LinkAll() - if err != nil { - t.Fatal(err) - } - err = bsnet1.ConnectTo(ctx, p2.ID()) - if err != nil { - t.Fatal(err) - } - select { - case <-ctx.Done(): - t.Fatal("did not connect peer") - case <-r1.connectionEvent: - } - err = bsnet2.ConnectTo(ctx, p1.ID()) - if err != nil { - t.Fatal(err) - } - select { - case <-ctx.Done(): - t.Fatal("did not connect peer") - case <-r2.connectionEvent: - } - if _, ok := r1.peers[p2.ID()]; !ok { - t.Fatal("did to connect to correct peer") - } - if _, ok := r2.peers[p1.ID()]; !ok { - t.Fatal("did to connect to correct peer") - } - blockGenerator := blocksutil.NewBlockGenerator() - block1 := blockGenerator.Next() - block2 := blockGenerator.Next() - sent := bsmsg.New(false) - sent.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) - sent.AddBlock(block2) - - err = bsnet1.SendMessage(ctx, p2.ID(), sent) - if err != nil { - t.Fatal(err) - } - - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case <-r2.messageReceived: - } - - sender := r2.lastSender - if sender != p1.ID() { - t.Fatal("received message from wrong node") - } - - received := r2.lastMessage - - sentWants := sent.Wantlist() - if len(sentWants) != 1 { - t.Fatal("Did not add want to sent message") - } - sentWant := sentWants[0] - receivedWants := received.Wantlist() - if len(receivedWants) != 1 { - t.Fatal("Did not add want to received message") - } - receivedWant := receivedWants[0] - if receivedWant.Cid != sentWant.Cid || - receivedWant.Priority != sentWant.Priority || - receivedWant.Cancel != sentWant.Cancel { - t.Fatal("Sent message wants did not match received message wants") - } - sentBlocks := sent.Blocks() - if len(sentBlocks) != 1 { - t.Fatal("Did not add block to sent message") - } - sentBlock := sentBlocks[0] - receivedBlocks := received.Blocks() - if len(receivedBlocks) != 1 { - t.Fatal("Did not add response to received message") - } - receivedBlock := receivedBlocks[0] - if receivedBlock.Cid() != sentBlock.Cid() { - t.Fatal("Sent message blocks did not match received message blocks") - } -} - -func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *receiver, p2 tnet.Identity, r2 *receiver) (*ErrHost, bsnet.BitSwapNetwork, *ErrHost, bsnet.BitSwapNetwork, bsmsg.BitSwapMessage) { - // create network - mn := mocknet.New() - defer mn.Close() - mr := mockrouting.NewServer() - - // Host 1 - h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) - if err != nil { - t.Fatal(err) - } - eh1 := &ErrHost{Host: h1} - routing1 := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) - bsnet1 := bsnet.NewFromIpfsHost(eh1, routing1) - bsnet1.Start(r1) - t.Cleanup(bsnet1.Stop) - if r1.listener != nil { - eh1.Network().Notify(r1.listener) - } - - // Host 2 - h2, err := mn.AddPeer(p2.PrivateKey(), p2.Address()) - if err != nil { - t.Fatal(err) - } - eh2 := &ErrHost{Host: h2} - routing2 := mr.ClientWithDatastore(context.TODO(), p2, ds.NewMapDatastore()) - bsnet2 := bsnet.NewFromIpfsHost(eh2, routing2) - bsnet2.Start(r2) - t.Cleanup(bsnet2.Stop) - if r2.listener != nil { - eh2.Network().Notify(r2.listener) - } - - // Networking - err = mn.LinkAll() - if err != nil { - t.Fatal(err) - } - err = bsnet1.ConnectTo(ctx, p2.ID()) - if err != nil { - t.Fatal(err) - } - isConnected := <-r1.connectionEvent - if !isConnected { - t.Fatal("Expected connect event") - } - - err = bsnet2.ConnectTo(ctx, p1.ID()) - if err != nil { - t.Fatal(err) - } - - blockGenerator := blocksutil.NewBlockGenerator() - block1 := blockGenerator.Next() - msg := bsmsg.New(false) - msg.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) - - return eh1, bsnet1, eh2, bsnet2, msg -} - -func TestMessageResendAfterError(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - p1 := tnet.RandIdentityOrFatal(t) - r1 := newReceiver() - p2 := tnet.RandIdentityOrFatal(t) - r2 := newReceiver() - - eh, bsnet1, _, _, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) - - testSendErrorBackoff := 100 * time.Millisecond - ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ - MaxRetries: 3, - SendTimeout: 100 * time.Millisecond, - SendErrorBackoff: testSendErrorBackoff, - }) - if err != nil { - t.Fatal(err) - } - defer ms.Close() - - // Return an error from the networking layer the next time we try to send - // a message - eh.setError(errMockNetErr) - - go func() { - time.Sleep(testSendErrorBackoff / 2) - // Stop throwing errors so that the following attempt to send succeeds - eh.setError(nil) - }() - - // Send message with retries, first one should fail, then subsequent - // message should succeed - err = ms.SendMsg(ctx, msg) - if err != nil { - t.Fatal(err) - } - - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case <-r2.messageReceived: - } -} - -func TestMessageSendTimeout(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - p1 := tnet.RandIdentityOrFatal(t) - r1 := newReceiver() - p2 := tnet.RandIdentityOrFatal(t) - r2 := newReceiver() - - eh, bsnet1, _, _, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) - - ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ - MaxRetries: 3, - SendTimeout: 100 * time.Millisecond, - SendErrorBackoff: 100 * time.Millisecond, - }) - if err != nil { - t.Fatal(err) - } - defer ms.Close() - - // Return a DeadlineExceeded error from the networking layer the next time we try to - // send a message - eh.setTimeoutState(true) - - // Send message with retries, all attempts should fail - err = ms.SendMsg(ctx, msg) - if err == nil { - t.Fatal("Expected error from SednMsg") - } - - select { - case <-time.After(500 * time.Millisecond): - t.Fatal("Did not receive disconnect event") - case isConnected := <-r1.connectionEvent: - if isConnected { - t.Fatal("Expected disconnect event (got connect event)") - } - } -} - -func TestMessageSendNotSupportedResponse(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - p1 := tnet.RandIdentityOrFatal(t) - r1 := newReceiver() - p2 := tnet.RandIdentityOrFatal(t) - r2 := newReceiver() - - eh, bsnet1, _, _, _ := prepareNetwork(t, ctx, p1, r1, p2, r2) - - eh.setError(multistream.ErrNotSupported) - ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ - MaxRetries: 3, - SendTimeout: 100 * time.Millisecond, - SendErrorBackoff: 100 * time.Millisecond, - }) - if err == nil { - ms.Close() - t.Fatal("Expected ErrNotSupported") - } - - select { - case <-time.After(500 * time.Millisecond): - t.Fatal("Did not receive disconnect event") - case isConnected := <-r1.connectionEvent: - if isConnected { - t.Fatal("Expected disconnect event (got connect event)") - } - } -} - -func TestSupportsHave(t *testing.T) { - ctx := context.Background() - mn := mocknet.New() - defer mn.Close() - mr := mockrouting.NewServer() - streamNet, err := tn.StreamNet(ctx, mn, mr) - if err != nil { - t.Fatalf("Unable to setup network: %s", err) - } - - type testCase struct { - proto protocol.ID - expSupportsHave bool - } - - testCases := []testCase{ - {bsnet.ProtocolBitswap, true}, - {bsnet.ProtocolBitswapOneOne, false}, - {bsnet.ProtocolBitswapOneZero, false}, - {bsnet.ProtocolBitswapNoVers, false}, - } - - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s-%v", tc.proto, tc.expSupportsHave), func(t *testing.T) { - p1 := tnet.RandIdentityOrFatal(t) - bsnet1 := streamNet.Adapter(p1) - bsnet1.Start(newReceiver()) - t.Cleanup(bsnet1.Stop) - - p2 := tnet.RandIdentityOrFatal(t) - bsnet2 := streamNet.Adapter(p2, bsnet.SupportedProtocols([]protocol.ID{tc.proto})) - bsnet2.Start(newReceiver()) - t.Cleanup(bsnet2.Stop) - - err = mn.LinkAll() - if err != nil { - t.Fatal(err) - } - - senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) - if err != nil { - t.Fatal(err) - } - defer senderCurrent.Close() - - if senderCurrent.SupportsHave() != tc.expSupportsHave { - t.Fatal("Expected sender HAVE message support", tc.proto, tc.expSupportsHave) - } - }) - } -} - -func testNetworkCounters(t *testing.T, n1 int, n2 int) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - p1 := tnet.RandIdentityOrFatal(t) - r1 := newReceiver() - p2 := tnet.RandIdentityOrFatal(t) - r2 := newReceiver() - - h1, bsnet1, h2, bsnet2, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) - - for n := 0; n < n1; n++ { - ctx, cancel := context.WithTimeout(ctx, time.Second) - err := bsnet1.SendMessage(ctx, p2.ID(), msg) - if err != nil { - t.Fatal(err) - } - select { - case <-ctx.Done(): - t.Fatal("p2 did not receive message sent") - case <-r2.messageReceived: - for j := 0; j < 2; j++ { - err := bsnet2.SendMessage(ctx, p1.ID(), msg) - if err != nil { - t.Fatal(err) - } - select { - case <-ctx.Done(): - t.Fatal("p1 did not receive message sent") - case <-r1.messageReceived: - } - } - } - cancel() - } - - if n2 > 0 { - ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) - if err != nil { - t.Fatal(err) - } - defer ms.Close() - for n := 0; n < n2; n++ { - ctx, cancel := context.WithTimeout(ctx, time.Second) - err = ms.SendMsg(ctx, msg) - if err != nil { - t.Fatal(err) - } - select { - case <-ctx.Done(): - t.Fatal("p2 did not receive message sent") - case <-r2.messageReceived: - for j := 0; j < 2; j++ { - err := bsnet2.SendMessage(ctx, p1.ID(), msg) - if err != nil { - t.Fatal(err) - } - select { - case <-ctx.Done(): - t.Fatal("p1 did not receive message sent") - case <-r1.messageReceived: - } - } - } - cancel() - } - ms.Close() - } - - // Wait until all streams are closed and MessagesRecvd counters - // updated. - ctxto, cancelto := context.WithTimeout(ctx, 5*time.Second) - defer cancelto() - ctxwait, cancelwait := context.WithCancel(ctx) - go func() { - // Wait until all streams are closed - throttler := time.NewTicker(time.Millisecond * 5) - defer throttler.Stop() - for { - h1.lk.Lock() - var done bool - for _, s := range h1.streams { - s.lk.Lock() - closed := s.closed - closed = closed || s.err != nil - s.lk.Unlock() - if closed { - continue - } - pid := s.Protocol() - for _, v := range internal.DefaultProtocols { - if pid == v { - goto ElseH1 - } - } - } - done = true - ElseH1: - h1.lk.Unlock() - if done { - break - } - select { - case <-ctxto.Done(): - return - case <-throttler.C: - } - } - - for { - h2.lk.Lock() - var done bool - for _, s := range h2.streams { - s.lk.Lock() - closed := s.closed - closed = closed || s.err != nil - s.lk.Unlock() - if closed { - continue - } - pid := s.Protocol() - for _, v := range internal.DefaultProtocols { - if pid == v { - goto ElseH2 - } - } - } - done = true - ElseH2: - h2.lk.Unlock() - if done { - break - } - select { - case <-ctxto.Done(): - return - case <-throttler.C: - } - } - - cancelwait() - }() - - select { - case <-ctxto.Done(): - t.Fatal("network streams closing timed out") - case <-ctxwait.Done(): - } - - if bsnet1.Stats().MessagesSent != uint64(n1+n2) { - t.Fatal(fmt.Errorf("expected %d sent messages, got %d", n1+n2, bsnet1.Stats().MessagesSent)) - } - - if bsnet2.Stats().MessagesRecvd != uint64(n1+n2) { - t.Fatal(fmt.Errorf("expected %d received messages, got %d", n1+n2, bsnet2.Stats().MessagesRecvd)) - } - - if bsnet1.Stats().MessagesRecvd != 2*uint64(n1+n2) { - t.Fatal(fmt.Errorf("expected %d received reply messages, got %d", 2*(n1+n2), bsnet1.Stats().MessagesRecvd)) - } -} - -func TestNetworkCounters(t *testing.T) { - for n := 0; n < 11; n++ { - testNetworkCounters(t, 10-n, n) - } -} diff --git a/network/ipfs_impl_timeout_test.go b/network/ipfs_impl_timeout_test.go deleted file mode 100644 index fdbe8e95..00000000 --- a/network/ipfs_impl_timeout_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package network - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestSendTimeout(t *testing.T) { - require.Equal(t, minSendTimeout, sendTimeout(0)) - require.Equal(t, maxSendTimeout, sendTimeout(1<<30)) - - // Check a 1MiB block (very large) - oneMiB := uint64(1 << 20) - hundredKbit := uint64(100 * 1000) - hundredKB := hundredKbit / 8 - expectedTime := sendLatency + time.Duration(oneMiB*uint64(time.Second)/hundredKB) - actualTime := sendTimeout(int(oneMiB)) - require.Equal(t, expectedTime, actualTime) - - // Check a 256KiB block (expected) - require.InDelta(t, 25*time.Second, sendTimeout(256<<10), float64(5*time.Second)) -} diff --git a/network/options.go b/network/options.go index 10d02e5e..d597c1ea 100644 --- a/network/options.go +++ b/network/options.go @@ -1,22 +1,22 @@ package network -import "github.com/libp2p/go-libp2p/core/protocol" +import ( + libipfs "github.com/ipfs/go-libipfs/bitswap/network" + "github.com/libp2p/go-libp2p/core/protocol" +) -type NetOpt func(*Settings) +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.NetOpt instead +type NetOpt = libipfs.NetOpt -type Settings struct { - ProtocolPrefix protocol.ID - SupportedProtocols []protocol.ID -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.Settings instead +type Settings = libipfs.Settings +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.Prefix instead func Prefix(prefix protocol.ID) NetOpt { - return func(settings *Settings) { - settings.ProtocolPrefix = prefix - } + return libipfs.Prefix(prefix) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/network.SupportedProtocols instead func SupportedProtocols(protos []protocol.ID) NetOpt { - return func(settings *Settings) { - settings.SupportedProtocols = protos - } + return libipfs.SupportedProtocols(protos) } diff --git a/options.go b/options.go index 6a1b5913..0ac8e4b3 100644 --- a/options.go +++ b/options.go @@ -3,77 +3,83 @@ package bitswap import ( "time" - "github.com/ipfs/go-bitswap/client" "github.com/ipfs/go-bitswap/server" "github.com/ipfs/go-bitswap/tracer" delay "github.com/ipfs/go-ipfs-delay" + libipfs "github.com/ipfs/go-libipfs/bitswap" ) -type option func(*Bitswap) - // Option is interface{} of server.Option or client.Option or func(*Bitswap) // wrapped in a struct to gain strong type checking. -type Option struct { - v interface{} -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.Option instead +type Option = libipfs.Option +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.EngineBlockstoreWorkerCount instead func EngineBlockstoreWorkerCount(count int) Option { - return Option{server.EngineBlockstoreWorkerCount(count)} + return libipfs.EngineBlockstoreWorkerCount(count) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.EngineTaskWorkerCount instead func EngineTaskWorkerCount(count int) Option { - return Option{server.EngineTaskWorkerCount(count)} + return libipfs.EngineTaskWorkerCount(count) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.MaxOutstandingBytesPerPeer instead func MaxOutstandingBytesPerPeer(count int) Option { - return Option{server.MaxOutstandingBytesPerPeer(count)} + return libipfs.MaxOutstandingBytesPerPeer(count) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.TaskWorkerCount instead func TaskWorkerCount(count int) Option { - return Option{server.TaskWorkerCount(count)} + return libipfs.TaskWorkerCount(count) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.ProvideEnabled instead func ProvideEnabled(enabled bool) Option { - return Option{server.ProvideEnabled(enabled)} + return libipfs.ProvideEnabled(enabled) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.SetSendDontHaves instead func SetSendDontHaves(send bool) Option { - return Option{server.SetSendDontHaves(send)} + return libipfs.SetSendDontHaves(send) } -func WithPeerBlockRequestFilter(pbrf server.PeerBlockRequestFilter) Option { - return Option{server.WithPeerBlockRequestFilter(pbrf)} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.WithPeerBlockRequestFilter instead +func WithPeerBlockRequestFilter(pbrf libipfs.PeerBlockRequestFilter) Option { + return libipfs.WithPeerBlockRequestFilter(pbrf) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.WithScoreLedger instead func WithScoreLedger(scoreLedger server.ScoreLedger) Option { - return Option{server.WithScoreLedger(scoreLedger)} + return libipfs.WithScoreLedger(scoreLedger) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.WithTargetMessageSize instead func WithTargetMessageSize(tms int) Option { - return Option{server.WithTargetMessageSize(tms)} + return libipfs.WithTargetMessageSize(tms) } -func WithTaskComparator(comparator server.TaskComparator) Option { - return Option{server.WithTaskComparator(comparator)} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.WithTaskComparator instead +func WithTaskComparator(comparator libipfs.TaskComparator) Option { + return libipfs.WithTaskComparator(comparator) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.ProviderSearchDelay instead func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { - return Option{client.ProviderSearchDelay(newProvSearchDelay)} + return libipfs.ProviderSearchDelay(newProvSearchDelay) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.RebroadcastDelay instead func RebroadcastDelay(newRebroadcastDelay delay.D) Option { - return Option{client.RebroadcastDelay(newRebroadcastDelay)} + return libipfs.RebroadcastDelay(newRebroadcastDelay) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.SetSimulateDontHavesOnTimeout instead func SetSimulateDontHavesOnTimeout(send bool) Option { - return Option{client.SetSimulateDontHavesOnTimeout(send)} + return libipfs.SetSimulateDontHavesOnTimeout(send) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap.WithTracer instead func WithTracer(tap tracer.Tracer) Option { - // Only trace the server, both receive the same messages anyway - return Option{ - option(func(bs *Bitswap) { - bs.tracer = tap - }), - } + return libipfs.WithTracer(tap) } diff --git a/sendOnlyTracer.go b/sendOnlyTracer.go deleted file mode 100644 index d01d3148..00000000 --- a/sendOnlyTracer.go +++ /dev/null @@ -1,20 +0,0 @@ -package bitswap - -import ( - "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/tracer" - "github.com/libp2p/go-libp2p/core/peer" -) - -type sendOnlyTracer interface { - MessageSent(peer.ID, message.BitSwapMessage) -} - -var _ tracer.Tracer = nopReceiveTracer{} - -// we need to only trace sends because we already trace receives in the polyfill object (to not get them traced twice) -type nopReceiveTracer struct { - sendOnlyTracer -} - -func (nopReceiveTracer) MessageReceived(peer.ID, message.BitSwapMessage) {} diff --git a/server/forward.go b/server/forward.go index 79c39d5d..0616097b 100644 --- a/server/forward.go +++ b/server/forward.go @@ -1,14 +1,20 @@ package server import ( - "github.com/ipfs/go-bitswap/server/internal/decision" + libipfs "github.com/ipfs/go-libipfs/bitswap/server" ) type ( - Receipt = decision.Receipt - PeerBlockRequestFilter = decision.PeerBlockRequestFilter - TaskComparator = decision.TaskComparator - TaskInfo = decision.TaskInfo - ScoreLedger = decision.ScoreLedger - ScorePeerFunc = decision.ScorePeerFunc + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.Receipt instead + Receipt = libipfs.Receipt + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.PeerBlockRequestFilter instead + PeerBlockRequestFilter = libipfs.PeerBlockRequestFilter + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.TaskComparator instead + TaskComparator = libipfs.TaskComparator + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.TaskInfo instead + TaskInfo = libipfs.TaskInfo + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.ScoreLedger instead + ScoreLedger = libipfs.ScoreLedger + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.ScorePeerFunc instead + ScorePeerFunc = libipfs.ScorePeerFunc ) diff --git a/server/internal/decision/blockstoremanager.go b/server/internal/decision/blockstoremanager.go deleted file mode 100644 index 01eae5a3..00000000 --- a/server/internal/decision/blockstoremanager.go +++ /dev/null @@ -1,149 +0,0 @@ -package decision - -import ( - "context" - "fmt" - "sync" - - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - bstore "github.com/ipfs/go-ipfs-blockstore" - ipld "github.com/ipfs/go-ipld-format" - "github.com/ipfs/go-metrics-interface" -) - -// blockstoreManager maintains a pool of workers that make requests to the blockstore. -type blockstoreManager struct { - bs bstore.Blockstore - workerCount int - jobs chan func() - pendingGauge metrics.Gauge - activeGauge metrics.Gauge - - workerWG sync.WaitGroup - stopChan chan struct{} - stopOnce sync.Once -} - -// newBlockstoreManager creates a new blockstoreManager with the given context -// and number of workers -func newBlockstoreManager( - bs bstore.Blockstore, - workerCount int, - pendingGauge metrics.Gauge, - activeGauge metrics.Gauge, -) *blockstoreManager { - return &blockstoreManager{ - bs: bs, - workerCount: workerCount, - jobs: make(chan func()), - pendingGauge: pendingGauge, - activeGauge: activeGauge, - stopChan: make(chan struct{}), - } -} - -func (bsm *blockstoreManager) start() { - bsm.workerWG.Add(bsm.workerCount) - for i := 0; i < bsm.workerCount; i++ { - go bsm.worker() - } -} - -func (bsm *blockstoreManager) stop() { - bsm.stopOnce.Do(func() { - close(bsm.stopChan) - }) - bsm.workerWG.Wait() -} - -func (bsm *blockstoreManager) worker() { - defer bsm.workerWG.Done() - for { - select { - case <-bsm.stopChan: - return - case job := <-bsm.jobs: - bsm.pendingGauge.Dec() - bsm.activeGauge.Inc() - job() - bsm.activeGauge.Dec() - } - } -} - -func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-bsm.stopChan: - return fmt.Errorf("shutting down") - case bsm.jobs <- job: - bsm.pendingGauge.Inc() - return nil - } -} - -func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) (map[cid.Cid]int, error) { - res := make(map[cid.Cid]int) - if len(ks) == 0 { - return res, nil - } - - var lk sync.Mutex - return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { - size, err := bsm.bs.GetSize(ctx, c) - if err != nil { - if !ipld.IsNotFound(err) { - // Note: this isn't a fatal error. We shouldn't abort the request - log.Errorf("blockstore.GetSize(%s) error: %s", c, err) - } - } else { - lk.Lock() - res[c] = size - lk.Unlock() - } - }) -} - -func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[cid.Cid]blocks.Block, error) { - res := make(map[cid.Cid]blocks.Block, len(ks)) - if len(ks) == 0 { - return res, nil - } - - var lk sync.Mutex - return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { - blk, err := bsm.bs.Get(ctx, c) - if err != nil { - if !ipld.IsNotFound(err) { - // Note: this isn't a fatal error. We shouldn't abort the request - log.Errorf("blockstore.Get(%s) error: %s", c, err) - } - return - } - - lk.Lock() - res[c] = blk - lk.Unlock() - }) -} - -func (bsm *blockstoreManager) jobPerKey(ctx context.Context, ks []cid.Cid, jobFn func(c cid.Cid)) error { - var err error - var wg sync.WaitGroup - for _, k := range ks { - c := k - wg.Add(1) - err = bsm.addJob(ctx, func() { - jobFn(c) - wg.Done() - }) - if err != nil { - wg.Done() - break - } - } - wg.Wait() - return err -} diff --git a/server/internal/decision/blockstoremanager_test.go b/server/internal/decision/blockstoremanager_test.go deleted file mode 100644 index d1c15027..00000000 --- a/server/internal/decision/blockstoremanager_test.go +++ /dev/null @@ -1,268 +0,0 @@ -package decision - -import ( - "context" - "crypto/rand" - "sync" - "testing" - "time" - - "github.com/ipfs/go-bitswap/internal/testutil" - cid "github.com/ipfs/go-cid" - "github.com/ipfs/go-metrics-interface" - - blocks "github.com/ipfs/go-block-format" - ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/delayed" - ds_sync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - delay "github.com/ipfs/go-ipfs-delay" -) - -func newBlockstoreManagerForTesting( - t *testing.T, - ctx context.Context, - bs blockstore.Blockstore, - workerCount int, -) *blockstoreManager { - testPendingBlocksGauge := metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() - testActiveBlocksGauge := metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() - bsm := newBlockstoreManager(bs, workerCount, testPendingBlocksGauge, testActiveBlocksGauge) - bsm.start() - t.Cleanup(bsm.stop) - return bsm -} - -func TestBlockstoreManagerNotFoundKey(t *testing.T) { - ctx := context.Background() - bsdelay := delay.Fixed(3 * time.Millisecond) - dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) - bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - - bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 5) - - cids := testutil.GenerateCids(4) - sizes, err := bsm.getBlockSizes(ctx, cids) - if err != nil { - t.Fatal(err) - } - if len(sizes) != 0 { - t.Fatal("Wrong response length") - } - - for _, c := range cids { - if _, ok := sizes[c]; ok { - t.Fatal("Non-existent block should have no size") - } - } - - blks, err := bsm.getBlocks(ctx, cids) - if err != nil { - t.Fatal(err) - } - if len(blks) != 0 { - t.Fatal("Wrong response length") - } - - for _, c := range cids { - if _, ok := blks[c]; ok { - t.Fatal("Non-existent block should have no size") - } - } -} - -func TestBlockstoreManager(t *testing.T) { - ctx := context.Background() - bsdelay := delay.Fixed(3 * time.Millisecond) - dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) - bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - - bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 5) - - exp := make(map[cid.Cid]blocks.Block) - var blks []blocks.Block - for i := 0; i < 32; i++ { - buf := make([]byte, 1024*(i+1)) - _, _ = rand.Read(buf) - b := blocks.NewBlock(buf) - blks = append(blks, b) - exp[b.Cid()] = b - } - - // Put all blocks in the blockstore except the last one - if err := bstore.PutMany(ctx, blks[:len(blks)-1]); err != nil { - t.Fatal(err) - } - - var cids []cid.Cid - for _, b := range blks { - cids = append(cids, b.Cid()) - } - - sizes, err := bsm.getBlockSizes(ctx, cids) - if err != nil { - t.Fatal(err) - } - if len(sizes) != len(blks)-1 { - t.Fatal("Wrong response length") - } - - for _, c := range cids { - expSize := len(exp[c].RawData()) - size, ok := sizes[c] - - // Only the last key should be missing - if c.Equals(cids[len(cids)-1]) { - if ok { - t.Fatal("Non-existent block should not be in sizes map") - } - } else { - if !ok { - t.Fatal("Block should be in sizes map") - } - if size != expSize { - t.Fatal("Block has wrong size") - } - } - } - - fetched, err := bsm.getBlocks(ctx, cids) - if err != nil { - t.Fatal(err) - } - if len(fetched) != len(blks)-1 { - t.Fatal("Wrong response length") - } - - for _, c := range cids { - blk, ok := fetched[c] - - // Only the last key should be missing - if c.Equals(cids[len(cids)-1]) { - if ok { - t.Fatal("Non-existent block should not be in blocks map") - } - } else { - if !ok { - t.Fatal("Block should be in blocks map") - } - if !blk.Cid().Equals(c) { - t.Fatal("Block has wrong cid") - } - } - } -} - -func TestBlockstoreManagerConcurrency(t *testing.T) { - ctx := context.Background() - bsdelay := delay.Fixed(3 * time.Millisecond) - dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) - bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - - workerCount := 5 - bsm := newBlockstoreManagerForTesting(t, ctx, bstore, workerCount) - - blkSize := int64(8 * 1024) - blks := testutil.GenerateBlocksOfSize(32, blkSize) - var ks []cid.Cid - for _, b := range blks { - ks = append(ks, b.Cid()) - } - - err := bstore.PutMany(ctx, blks) - if err != nil { - t.Fatal(err) - } - - // Create more concurrent requests than the number of workers - wg := sync.WaitGroup{} - for i := 0; i < 16; i++ { - wg.Add(1) - - go func(t *testing.T) { - defer wg.Done() - - sizes, err := bsm.getBlockSizes(ctx, ks) - if err != nil { - t.Error(err) - } - if len(sizes) != len(blks) { - t.Error("Wrong response length") - } - }(t) - } - wg.Wait() -} - -func TestBlockstoreManagerClose(t *testing.T) { - ctx := context.Background() - delayTime := 20 * time.Millisecond - bsdelay := delay.Fixed(delayTime) - dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) - bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - - bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) - - blks := testutil.GenerateBlocksOfSize(10, 1024) - var ks []cid.Cid - for _, b := range blks { - ks = append(ks, b.Cid()) - } - - err := bstore.PutMany(ctx, blks) - if err != nil { - t.Fatal(err) - } - - bsm.stop() - - time.Sleep(5 * time.Millisecond) - - before := time.Now() - _, err = bsm.getBlockSizes(ctx, ks) - if err == nil { - t.Error("expected an error") - } - // would expect to wait delayTime*10 if we didn't cancel. - if time.Since(before) > delayTime*2 { - t.Error("expected a fast timeout") - } -} - -func TestBlockstoreManagerCtxDone(t *testing.T) { - delayTime := 20 * time.Millisecond - bsdelay := delay.Fixed(delayTime) - - underlyingDstore := ds_sync.MutexWrap(ds.NewMapDatastore()) - dstore := delayed.New(underlyingDstore, bsdelay) - underlyingBstore := blockstore.NewBlockstore(underlyingDstore) - bstore := blockstore.NewBlockstore(dstore) - - ctx := context.Background() - bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) - - blks := testutil.GenerateBlocksOfSize(100, 128) - var ks []cid.Cid - for _, b := range blks { - ks = append(ks, b.Cid()) - } - - err := underlyingBstore.PutMany(ctx, blks) - if err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), delayTime/2) - defer cancel() - - before := time.Now() - _, err = bsm.getBlockSizes(ctx, ks) - if err == nil { - t.Error("expected an error") - } - - // would expect to wait delayTime*100/3 if we didn't cancel. - if time.Since(before) > delayTime*10 { - t.Error("expected a fast timeout") - } -} diff --git a/server/internal/decision/engine.go b/server/internal/decision/engine.go deleted file mode 100644 index 5a7df4b7..00000000 --- a/server/internal/decision/engine.go +++ /dev/null @@ -1,1026 +0,0 @@ -// Package decision implements the decision engine for the bitswap service. -package decision - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/google/uuid" - - wl "github.com/ipfs/go-bitswap/client/wantlist" - "github.com/ipfs/go-bitswap/internal/defaults" - bsmsg "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" - bmetrics "github.com/ipfs/go-bitswap/metrics" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - bstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log" - "github.com/ipfs/go-metrics-interface" - "github.com/ipfs/go-peertaskqueue" - "github.com/ipfs/go-peertaskqueue/peertask" - "github.com/ipfs/go-peertaskqueue/peertracker" - process "github.com/jbenet/goprocess" - "github.com/libp2p/go-libp2p/core/peer" -) - -// TODO consider taking responsibility for other types of requests. For -// example, there could be a |cancelQueue| for all of the cancellation -// messages that need to go out. There could also be a |wantlistQueue| for -// the local peer's wantlists. Alternatively, these could all be bundled -// into a single, intelligent global queue that efficiently -// batches/combines and takes all of these into consideration. -// -// Right now, messages go onto the network for four reasons: -// 1. an initial `sendwantlist` message to a provider of the first key in a -// request -// 2. a periodic full sweep of `sendwantlist` messages to all providers -// 3. upon receipt of blocks, a `cancel` message to all peers -// 4. draining the priority queue of `blockrequests` from peers -// -// Presently, only `blockrequests` are handled by the decision engine. -// However, there is an opportunity to give it more responsibility! If the -// decision engine is given responsibility for all of the others, it can -// intelligently decide how to combine requests efficiently. -// -// Some examples of what would be possible: -// -// * when sending out the wantlists, include `cancel` requests -// * when handling `blockrequests`, include `sendwantlist` and `cancel` as -// appropriate -// * when handling `cancel`, if we recently received a wanted block from a -// peer, include a partial wantlist that contains a few other high priority -// blocks -// -// In a sense, if we treat the decision engine as a black box, it could do -// whatever it sees fit to produce desired outcomes (get wanted keys -// quickly, maintain good relationships with peers, etc). - -var log = logging.Logger("engine") - -const ( - // outboxChanBuffer must be 0 to prevent stale messages from being sent - outboxChanBuffer = 0 - // targetMessageSize is the ideal size of the batched payload. We try to - // pop this much data off the request queue, but it may be a little more - // or less depending on what's in the queue. - defaultTargetMessageSize = 16 * 1024 - // tagFormat is the tag given to peers associated an engine - tagFormat = "bs-engine-%s-%s" - - // queuedTagWeight is the default weight for peers that have work queued - // on their behalf. - queuedTagWeight = 10 - - // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in - // bytes up to which we will replace a want-have with a want-block - maxBlockSizeReplaceHasWithBlock = 1024 -) - -// Envelope contains a message for a Peer. -type Envelope struct { - // Peer is the intended recipient. - Peer peer.ID - - // Message is the payload. - Message bsmsg.BitSwapMessage - - // A callback to notify the decision queue that the task is complete - Sent func() -} - -// PeerTagger covers the methods on the connection manager used by the decision -// engine to tag peers -type PeerTagger interface { - TagPeer(peer.ID, string, int) - UntagPeer(p peer.ID, tag string) -} - -// Assigns a specific score to a peer -type ScorePeerFunc func(peer.ID, int) - -// ScoreLedger is an external ledger dealing with peer scores. -type ScoreLedger interface { - // Returns aggregated data communication with a given peer. - GetReceipt(p peer.ID) *Receipt - // Increments the sent counter for the given peer. - AddToSentBytes(p peer.ID, n int) - // Increments the received counter for the given peer. - AddToReceivedBytes(p peer.ID, n int) - // PeerConnected should be called when a new peer connects, - // meaning the ledger should open accounting. - PeerConnected(p peer.ID) - // PeerDisconnected should be called when a peer disconnects to - // clean up the accounting. - PeerDisconnected(p peer.ID) - // Starts the ledger sampling process. - Start(scorePeer ScorePeerFunc) - // Stops the sampling process. - Stop() -} - -// Engine manages sending requested blocks to peers. -type Engine struct { - // peerRequestQueue is a priority queue of requests received from peers. - // Requests are popped from the queue, packaged up, and placed in the - // outbox. - peerRequestQueue *peertaskqueue.PeerTaskQueue - - // FIXME it's a bit odd for the client and the worker to both share memory - // (both modify the peerRequestQueue) and also to communicate over the - // workSignal channel. consider sending requests over the channel and - // allowing the worker to have exclusive access to the peerRequestQueue. In - // that case, no lock would be required. - workSignal chan struct{} - - // outbox contains outgoing messages to peers. This is owned by the - // taskWorker goroutine - outbox chan (<-chan *Envelope) - - bsm *blockstoreManager - - peerTagger PeerTagger - - tagQueued, tagUseful string - - lock sync.RWMutex // protects the fields immediately below - - // ledgerMap lists block-related Ledgers by their Partner key. - ledgerMap map[peer.ID]*ledger - - // peerLedger saves which peers are waiting for a Cid - peerLedger *peerLedger - - // an external ledger dealing with peer scores - scoreLedger ScoreLedger - - ticker *time.Ticker - - taskWorkerLock sync.Mutex - taskWorkerCount int - - targetMessageSize int - - // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in - // bytes up to which we will replace a want-have with a want-block - maxBlockSizeReplaceHasWithBlock int - - sendDontHaves bool - - self peer.ID - - // metrics gauge for total pending tasks across all workers - pendingGauge metrics.Gauge - - // metrics gauge for total pending tasks across all workers - activeGauge metrics.Gauge - - // used to ensure metrics are reported each fixed number of operation - metricsLock sync.Mutex - metricUpdateCounter int - - taskComparator TaskComparator - - peerBlockRequestFilter PeerBlockRequestFilter - - bstoreWorkerCount int - maxOutstandingBytesPerPeer int -} - -// TaskInfo represents the details of a request from a peer. -type TaskInfo struct { - Peer peer.ID - // The CID of the block - Cid cid.Cid - // Tasks can be want-have or want-block - IsWantBlock bool - // Whether to immediately send a response if the block is not found - SendDontHave bool - // The size of the block corresponding to the task - BlockSize int - // Whether the block was found - HaveBlock bool -} - -// TaskComparator is used for task prioritization. -// It should return true if task 'ta' has higher priority than task 'tb' -type TaskComparator func(ta, tb *TaskInfo) bool - -// PeerBlockRequestFilter is used to accept / deny requests for a CID coming from a PeerID -// It should return true if the request should be fullfilled. -type PeerBlockRequestFilter func(p peer.ID, c cid.Cid) bool - -type Option func(*Engine) - -func WithTaskComparator(comparator TaskComparator) Option { - return func(e *Engine) { - e.taskComparator = comparator - } -} - -func WithPeerBlockRequestFilter(pbrf PeerBlockRequestFilter) Option { - return func(e *Engine) { - e.peerBlockRequestFilter = pbrf - } -} - -func WithTargetMessageSize(size int) Option { - return func(e *Engine) { - e.targetMessageSize = size - } -} - -func WithScoreLedger(scoreledger ScoreLedger) Option { - return func(e *Engine) { - e.scoreLedger = scoreledger - } -} - -// WithBlockstoreWorkerCount sets the number of worker threads used for -// blockstore operations in the decision engine -func WithBlockstoreWorkerCount(count int) Option { - if count <= 0 { - panic(fmt.Sprintf("Engine blockstore worker count is %d but must be > 0", count)) - } - return func(e *Engine) { - e.bstoreWorkerCount = count - } -} - -// WithTaskWorkerCount sets the number of worker threads used inside the engine -func WithTaskWorkerCount(count int) Option { - if count <= 0 { - panic(fmt.Sprintf("Engine task worker count is %d but must be > 0", count)) - } - return func(e *Engine) { - e.taskWorkerCount = count - } -} - -// WithMaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any -// given time. Setting it to 0 will disable any limiting. -func WithMaxOutstandingBytesPerPeer(count int) Option { - if count < 0 { - panic(fmt.Sprintf("max outstanding bytes per peer is %d but must be >= 0", count)) - } - return func(e *Engine) { - e.maxOutstandingBytesPerPeer = count - } -} - -func WithSetSendDontHave(send bool) Option { - return func(e *Engine) { - e.sendDontHaves = send - } -} - -// wrapTaskComparator wraps a TaskComparator so it can be used as a QueueTaskComparator -func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { - return func(a, b *peertask.QueueTask) bool { - taskDataA := a.Task.Data.(*taskData) - taskInfoA := &TaskInfo{ - Peer: a.Target, - Cid: a.Task.Topic.(cid.Cid), - IsWantBlock: taskDataA.IsWantBlock, - SendDontHave: taskDataA.SendDontHave, - BlockSize: taskDataA.BlockSize, - HaveBlock: taskDataA.HaveBlock, - } - taskDataB := b.Task.Data.(*taskData) - taskInfoB := &TaskInfo{ - Peer: b.Target, - Cid: b.Task.Topic.(cid.Cid), - IsWantBlock: taskDataB.IsWantBlock, - SendDontHave: taskDataB.SendDontHave, - BlockSize: taskDataB.BlockSize, - HaveBlock: taskDataB.HaveBlock, - } - return tc(taskInfoA, taskInfoB) - } -} - -// NewEngine creates a new block sending engine for the given block store. -// maxOutstandingBytesPerPeer hints to the peer task queue not to give a peer more tasks if it has some maximum -// work already outstanding. -func NewEngine( - ctx context.Context, - bs bstore.Blockstore, - peerTagger PeerTagger, - self peer.ID, - opts ...Option, -) *Engine { - return newEngine( - ctx, - bs, - peerTagger, - self, - maxBlockSizeReplaceHasWithBlock, - opts..., - ) -} - -func newEngine( - ctx context.Context, - bs bstore.Blockstore, - peerTagger PeerTagger, - self peer.ID, - maxReplaceSize int, - opts ...Option, -) *Engine { - e := &Engine{ - ledgerMap: make(map[peer.ID]*ledger), - scoreLedger: NewDefaultScoreLedger(), - bstoreWorkerCount: defaults.BitswapEngineBlockstoreWorkerCount, - maxOutstandingBytesPerPeer: defaults.BitswapMaxOutstandingBytesPerPeer, - peerTagger: peerTagger, - outbox: make(chan (<-chan *Envelope), outboxChanBuffer), - workSignal: make(chan struct{}, 1), - ticker: time.NewTicker(time.Millisecond * 100), - maxBlockSizeReplaceHasWithBlock: maxReplaceSize, - taskWorkerCount: defaults.BitswapEngineTaskWorkerCount, - sendDontHaves: true, - self: self, - peerLedger: newPeerLedger(), - pendingGauge: bmetrics.PendingEngineGauge(ctx), - activeGauge: bmetrics.ActiveEngineGauge(ctx), - targetMessageSize: defaultTargetMessageSize, - tagQueued: fmt.Sprintf(tagFormat, "queued", uuid.New().String()), - tagUseful: fmt.Sprintf(tagFormat, "useful", uuid.New().String()), - } - - for _, opt := range opts { - opt(e) - } - - e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, bmetrics.PendingBlocksGauge(ctx), bmetrics.ActiveBlocksGauge(ctx)) - - // default peer task queue options - peerTaskQueueOpts := []peertaskqueue.Option{ - peertaskqueue.OnPeerAddedHook(e.onPeerAdded), - peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), - peertaskqueue.TaskMerger(newTaskMerger()), - peertaskqueue.IgnoreFreezing(true), - peertaskqueue.MaxOutstandingWorkPerPeer(e.maxOutstandingBytesPerPeer), - } - - if e.taskComparator != nil { - queueTaskComparator := wrapTaskComparator(e.taskComparator) - peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.PeerComparator(peertracker.TaskPriorityPeerComparator(queueTaskComparator))) - peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.TaskComparator(queueTaskComparator)) - } - - e.peerRequestQueue = peertaskqueue.New(peerTaskQueueOpts...) - - return e -} - -func (e *Engine) updateMetrics() { - e.metricsLock.Lock() - c := e.metricUpdateCounter - e.metricUpdateCounter++ - e.metricsLock.Unlock() - - if c%100 == 0 { - stats := e.peerRequestQueue.Stats() - e.activeGauge.Set(float64(stats.NumActive)) - e.pendingGauge.Set(float64(stats.NumPending)) - } -} - -// SetSendDontHaves indicates what to do when the engine receives a want-block -// for a block that is not in the blockstore. Either -// - Send a DONT_HAVE message -// - Simply don't respond -// Older versions of Bitswap did not respond, so this allows us to simulate -// those older versions for testing. -func (e *Engine) SetSendDontHaves(send bool) { - e.sendDontHaves = send -} - -// Starts the score ledger. Before start the function checks and, -// if it is unset, initializes the scoreLedger with the default -// implementation. -func (e *Engine) startScoreLedger(px process.Process) { - e.scoreLedger.Start(func(p peer.ID, score int) { - if score == 0 { - e.peerTagger.UntagPeer(p, e.tagUseful) - } else { - e.peerTagger.TagPeer(p, e.tagUseful, score) - } - }) - px.Go(func(ppx process.Process) { - <-ppx.Closing() - e.scoreLedger.Stop() - }) -} - -func (e *Engine) startBlockstoreManager(px process.Process) { - e.bsm.start() - px.Go(func(ppx process.Process) { - <-ppx.Closing() - e.bsm.stop() - }) -} - -// Start up workers to handle requests from other nodes for the data on this node -func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { - e.startBlockstoreManager(px) - e.startScoreLedger(px) - - e.taskWorkerLock.Lock() - defer e.taskWorkerLock.Unlock() - - for i := 0; i < e.taskWorkerCount; i++ { - px.Go(func(_ process.Process) { - e.taskWorker(ctx) - }) - } - -} - -func (e *Engine) onPeerAdded(p peer.ID) { - e.peerTagger.TagPeer(p, e.tagQueued, queuedTagWeight) -} - -func (e *Engine) onPeerRemoved(p peer.ID) { - e.peerTagger.UntagPeer(p, e.tagQueued) -} - -// WantlistForPeer returns the list of keys that the given peer has asked for -func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry { - partner := e.findOrCreate(p) - - partner.lk.Lock() - entries := partner.wantList.Entries() - partner.lk.Unlock() - - return entries -} - -// LedgerForPeer returns aggregated data communication with a given peer. -func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { - return e.scoreLedger.GetReceipt(p) -} - -// Each taskWorker pulls items off the request queue up to the maximum size -// and adds them to an envelope that is passed off to the bitswap workers, -// which send the message to the network. -func (e *Engine) taskWorker(ctx context.Context) { - defer e.taskWorkerExit() - for { - oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking - select { - case <-ctx.Done(): - return - case e.outbox <- oneTimeUse: - } - // receiver is ready for an outoing envelope. let's prepare one. first, - // we must acquire a task from the PQ... - envelope, err := e.nextEnvelope(ctx) - if err != nil { - close(oneTimeUse) - return // ctx cancelled - } - oneTimeUse <- envelope // buffered. won't block - close(oneTimeUse) - } -} - -// taskWorkerExit handles cleanup of task workers -func (e *Engine) taskWorkerExit() { - e.taskWorkerLock.Lock() - defer e.taskWorkerLock.Unlock() - - e.taskWorkerCount-- - if e.taskWorkerCount == 0 { - close(e.outbox) - } -} - -// nextEnvelope runs in the taskWorker goroutine. Returns an error if the -// context is cancelled before the next Envelope can be created. -func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { - for { - // Pop some tasks off the request queue - p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(e.targetMessageSize) - e.updateMetrics() - for len(nextTasks) == 0 { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-e.workSignal: - p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) - e.updateMetrics() - case <-e.ticker.C: - // When a task is cancelled, the queue may be "frozen" for a - // period of time. We periodically "thaw" the queue to make - // sure it doesn't get stuck in a frozen state. - e.peerRequestQueue.ThawRound() - p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) - e.updateMetrics() - } - } - - // Create a new message - msg := bsmsg.New(false) - - log.Debugw("Bitswap process tasks", "local", e.self, "taskCount", len(nextTasks)) - - // Amount of data in the request queue still waiting to be popped - msg.SetPendingBytes(int32(pendingBytes)) - - // Split out want-blocks, want-haves and DONT_HAVEs - blockCids := make([]cid.Cid, 0, len(nextTasks)) - blockTasks := make(map[cid.Cid]*taskData, len(nextTasks)) - for _, t := range nextTasks { - c := t.Topic.(cid.Cid) - td := t.Data.(*taskData) - if td.HaveBlock { - if td.IsWantBlock { - blockCids = append(blockCids, c) - blockTasks[c] = td - } else { - // Add HAVES to the message - msg.AddHave(c) - } - } else { - // Add DONT_HAVEs to the message - msg.AddDontHave(c) - } - } - - // Fetch blocks from datastore - blks, err := e.bsm.getBlocks(ctx, blockCids) - if err != nil { - // we're dropping the envelope but that's not an issue in practice. - return nil, err - } - - for c, t := range blockTasks { - blk := blks[c] - // If the block was not found (it has been removed) - if blk == nil { - // If the client requested DONT_HAVE, add DONT_HAVE to the message - if t.SendDontHave { - msg.AddDontHave(c) - } - } else { - // Add the block to the message - // log.Debugf(" make evlp %s->%s block: %s (%d bytes)", e.self, p, c, len(blk.RawData())) - msg.AddBlock(blk) - } - } - - // If there's nothing in the message, bail out - if msg.Empty() { - e.peerRequestQueue.TasksDone(p, nextTasks...) - continue - } - - log.Debugw("Bitswap engine -> msg", "local", e.self, "to", p, "blockCount", len(msg.Blocks()), "presenceCount", len(msg.BlockPresences()), "size", msg.Size()) - return &Envelope{ - Peer: p, - Message: msg, - Sent: func() { - // Once the message has been sent, signal the request queue so - // it can be cleared from the queue - e.peerRequestQueue.TasksDone(p, nextTasks...) - - // Signal the worker to check for more work - e.signalNewWork() - }, - }, nil - } -} - -// Outbox returns a channel of one-time use Envelope channels. -func (e *Engine) Outbox() <-chan (<-chan *Envelope) { - return e.outbox -} - -// Peers returns a slice of Peers with whom the local node has active sessions. -func (e *Engine) Peers() []peer.ID { - e.lock.RLock() - defer e.lock.RUnlock() - - response := make([]peer.ID, 0, len(e.ledgerMap)) - - for _, ledger := range e.ledgerMap { - response = append(response, ledger.Partner) - } - return response -} - -// MessageReceived is called when a message is received from a remote peer. -// For each item in the wantlist, add a want-have or want-block entry to the -// request queue (this is later popped off by the workerTasks) -func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) { - entries := m.Wantlist() - - if len(entries) > 0 { - log.Debugw("Bitswap engine <- msg", "local", e.self, "from", p, "entryCount", len(entries)) - for _, et := range entries { - if !et.Cancel { - if et.WantType == pb.Message_Wantlist_Have { - log.Debugw("Bitswap engine <- want-have", "local", e.self, "from", p, "cid", et.Cid) - } else { - log.Debugw("Bitswap engine <- want-block", "local", e.self, "from", p, "cid", et.Cid) - } - } - } - } - - if m.Empty() { - log.Infof("received empty message from %s", p) - } - - newWorkExists := false - defer func() { - if newWorkExists { - e.signalNewWork() - } - }() - - // Dispatch entries - wants, cancels := e.splitWantsCancels(entries) - wants, denials := e.splitWantsDenials(p, wants) - - // Get block sizes - wantKs := cid.NewSet() - for _, entry := range wants { - wantKs.Add(entry.Cid) - } - blockSizes, err := e.bsm.getBlockSizes(ctx, wantKs.Keys()) - if err != nil { - log.Info("aborting message processing", err) - return - } - - e.lock.Lock() - for _, entry := range wants { - e.peerLedger.Wants(p, entry.Cid) - } - for _, entry := range cancels { - e.peerLedger.CancelWant(p, entry.Cid) - } - e.lock.Unlock() - - // Get the ledger for the peer - l := e.findOrCreate(p) - l.lk.Lock() - defer l.lk.Unlock() - - // If the peer sent a full wantlist, replace the ledger's wantlist - if m.Full() { - l.wantList = wl.New() - } - - var activeEntries []peertask.Task - - // Remove cancelled blocks from the queue - for _, entry := range cancels { - log.Debugw("Bitswap engine <- cancel", "local", e.self, "from", p, "cid", entry.Cid) - if l.CancelWant(entry.Cid) { - e.peerRequestQueue.Remove(entry.Cid, p) - } - } - - // Cancel a block operation - sendDontHave := func(entry bsmsg.Entry) { - // Only add the task to the queue if the requester wants a DONT_HAVE - if e.sendDontHaves && entry.SendDontHave { - c := entry.Cid - - newWorkExists = true - isWantBlock := false - if entry.WantType == pb.Message_Wantlist_Block { - isWantBlock = true - } - - activeEntries = append(activeEntries, peertask.Task{ - Topic: c, - Priority: int(entry.Priority), - Work: bsmsg.BlockPresenceSize(c), - Data: &taskData{ - BlockSize: 0, - HaveBlock: false, - IsWantBlock: isWantBlock, - SendDontHave: entry.SendDontHave, - }, - }) - } - } - - // Deny access to blocks - for _, entry := range denials { - log.Debugw("Bitswap engine: block denied access", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) - sendDontHave(entry) - } - - // For each want-have / want-block - for _, entry := range wants { - c := entry.Cid - blockSize, found := blockSizes[entry.Cid] - - // Add each want-have / want-block to the ledger - l.Wants(c, entry.Priority, entry.WantType) - - // If the block was not found - if !found { - log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) - sendDontHave(entry) - } else { - // The block was found, add it to the queue - newWorkExists = true - - isWantBlock := e.sendAsBlock(entry.WantType, blockSize) - - log.Debugw("Bitswap engine: block found", "local", e.self, "from", p, "cid", entry.Cid, "isWantBlock", isWantBlock) - - // entrySize is the amount of space the entry takes up in the - // message we send to the recipient. If we're sending a block, the - // entrySize is the size of the block. Otherwise it's the size of - // a block presence entry. - entrySize := blockSize - if !isWantBlock { - entrySize = bsmsg.BlockPresenceSize(c) - } - activeEntries = append(activeEntries, peertask.Task{ - Topic: c, - Priority: int(entry.Priority), - Work: entrySize, - Data: &taskData{ - BlockSize: blockSize, - HaveBlock: true, - IsWantBlock: isWantBlock, - SendDontHave: entry.SendDontHave, - }, - }) - } - } - - // Push entries onto the request queue - if len(activeEntries) > 0 { - e.peerRequestQueue.PushTasks(p, activeEntries...) - e.updateMetrics() - } -} - -// Split the want-have / want-block entries from the cancel entries -func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { - wants := make([]bsmsg.Entry, 0, len(es)) - cancels := make([]bsmsg.Entry, 0, len(es)) - for _, et := range es { - if et.Cancel { - cancels = append(cancels, et) - } else { - wants = append(wants, et) - } - } - return wants, cancels -} - -// Split the want-have / want-block entries from the block that will be denied access -func (e *Engine) splitWantsDenials(p peer.ID, allWants []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { - if e.peerBlockRequestFilter == nil { - return allWants, nil - } - - wants := make([]bsmsg.Entry, 0, len(allWants)) - denied := make([]bsmsg.Entry, 0, len(allWants)) - - for _, et := range allWants { - if e.peerBlockRequestFilter(p, et.Cid) { - wants = append(wants, et) - } else { - denied = append(denied, et) - } - } - - return wants, denied -} - -// ReceivedBlocks is called when new blocks are received from the network. -// This function also updates the receive side of the ledger. -func (e *Engine) ReceivedBlocks(from peer.ID, blks []blocks.Block) { - if len(blks) == 0 { - return - } - - l := e.findOrCreate(from) - - // Record how many bytes were received in the ledger - l.lk.Lock() - defer l.lk.Unlock() - for _, blk := range blks { - log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) - e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData())) - } -} - -// NotifyNewBlocks is called when new blocks becomes available locally, and in particular when the caller of bitswap -// decide to store those blocks and make them available on the network. -func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { - if len(blks) == 0 { - return - } - - // Get the size of each block - blockSizes := make(map[cid.Cid]int, len(blks)) - for _, blk := range blks { - blockSizes[blk.Cid()] = len(blk.RawData()) - } - - // Check each peer to see if it wants one of the blocks we received - var work bool - missingWants := make(map[peer.ID][]cid.Cid) - for _, b := range blks { - k := b.Cid() - - e.lock.RLock() - peers := e.peerLedger.Peers(k) - e.lock.RUnlock() - - for _, p := range peers { - e.lock.RLock() - ledger, ok := e.ledgerMap[p] - e.lock.RUnlock() - - if !ok { - // This can happen if the peer has disconnected while we're processing this list. - log.Debugw("failed to find peer in ledger", "peer", p) - missingWants[p] = append(missingWants[p], k) - continue - } - ledger.lk.RLock() - entry, ok := ledger.WantListContains(k) - ledger.lk.RUnlock() - if !ok { - // This can happen if the peer has canceled their want while we're processing this message. - log.Debugw("wantlist index doesn't match peer's wantlist", "peer", p) - missingWants[p] = append(missingWants[p], k) - continue - } - work = true - - blockSize := blockSizes[k] - isWantBlock := e.sendAsBlock(entry.WantType, blockSize) - - entrySize := blockSize - if !isWantBlock { - entrySize = bsmsg.BlockPresenceSize(k) - } - - e.peerRequestQueue.PushTasks(p, peertask.Task{ - Topic: entry.Cid, - Priority: int(entry.Priority), - Work: entrySize, - Data: &taskData{ - BlockSize: blockSize, - HaveBlock: true, - IsWantBlock: isWantBlock, - SendDontHave: false, - }, - }) - e.updateMetrics() - } - } - - // If we found missing wants (e.g., because the peer disconnected, we have some races here) - // remove them from the list. Unfortunately, we still have to re-check because the user - // could have re-connected in the meantime. - if len(missingWants) > 0 { - e.lock.Lock() - for p, wl := range missingWants { - if ledger, ok := e.ledgerMap[p]; ok { - ledger.lk.RLock() - for _, k := range wl { - if _, has := ledger.WantListContains(k); has { - continue - } - e.peerLedger.CancelWant(p, k) - } - ledger.lk.RUnlock() - } else { - for _, k := range wl { - e.peerLedger.CancelWant(p, k) - } - } - } - e.lock.Unlock() - } - - if work { - e.signalNewWork() - } -} - -// TODO add contents of m.WantList() to my local wantlist? NB: could introduce -// race conditions where I send a message, but MessageSent gets handled after -// MessageReceived. The information in the local wantlist could become -// inconsistent. Would need to ensure that Sends and acknowledgement of the -// send happen atomically - -// MessageSent is called when a message has successfully been sent out, to record -// changes. -func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { - l := e.findOrCreate(p) - l.lk.Lock() - defer l.lk.Unlock() - - // Remove sent blocks from the want list for the peer - for _, block := range m.Blocks() { - e.scoreLedger.AddToSentBytes(l.Partner, len(block.RawData())) - l.wantList.RemoveType(block.Cid(), pb.Message_Wantlist_Block) - } - - // Remove sent block presences from the want list for the peer - for _, bp := range m.BlockPresences() { - // Don't record sent data. We reserve that for data blocks. - if bp.Type == pb.Message_Have { - l.wantList.RemoveType(bp.Cid, pb.Message_Wantlist_Have) - } - } -} - -// PeerConnected is called when a new peer connects, meaning we should start -// sending blocks. -func (e *Engine) PeerConnected(p peer.ID) { - e.lock.Lock() - defer e.lock.Unlock() - - _, ok := e.ledgerMap[p] - if !ok { - e.ledgerMap[p] = newLedger(p) - } - - e.scoreLedger.PeerConnected(p) -} - -// PeerDisconnected is called when a peer disconnects. -func (e *Engine) PeerDisconnected(p peer.ID) { - e.lock.Lock() - defer e.lock.Unlock() - - ledger, ok := e.ledgerMap[p] - if ok { - ledger.lk.RLock() - entries := ledger.Entries() - ledger.lk.RUnlock() - - for _, entry := range entries { - e.peerLedger.CancelWant(p, entry.Cid) - } - } - delete(e.ledgerMap, p) - - e.scoreLedger.PeerDisconnected(p) -} - -// If the want is a want-have, and it's below a certain size, send the full -// block (instead of sending a HAVE) -func (e *Engine) sendAsBlock(wantType pb.Message_Wantlist_WantType, blockSize int) bool { - isWantBlock := wantType == pb.Message_Wantlist_Block - return isWantBlock || blockSize <= e.maxBlockSizeReplaceHasWithBlock -} - -func (e *Engine) numBytesSentTo(p peer.ID) uint64 { - return e.LedgerForPeer(p).Sent -} - -func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { - return e.LedgerForPeer(p).Recv -} - -// ledger lazily instantiates a ledger -func (e *Engine) findOrCreate(p peer.ID) *ledger { - // Take a read lock (as it's less expensive) to check if we have a ledger - // for the peer - e.lock.RLock() - l, ok := e.ledgerMap[p] - e.lock.RUnlock() - if ok { - return l - } - - // There's no ledger, so take a write lock, then check again and create the - // ledger if necessary - e.lock.Lock() - defer e.lock.Unlock() - l, ok = e.ledgerMap[p] - if !ok { - l = newLedger(p) - e.ledgerMap[p] = l - } - return l -} - -func (e *Engine) signalNewWork() { - // Signal task generation to restart (if stopped!) - select { - case e.workSignal <- struct{}{}: - default: - } -} diff --git a/server/internal/decision/engine_test.go b/server/internal/decision/engine_test.go deleted file mode 100644 index 8872eeb9..00000000 --- a/server/internal/decision/engine_test.go +++ /dev/null @@ -1,1610 +0,0 @@ -package decision - -import ( - "bytes" - "context" - "errors" - "fmt" - "strings" - "sync" - "testing" - "time" - - "github.com/benbjohnson/clock" - "github.com/ipfs/go-bitswap/internal/testutil" - message "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - process "github.com/jbenet/goprocess" - peer "github.com/libp2p/go-libp2p/core/peer" - libp2ptest "github.com/libp2p/go-libp2p/core/test" -) - -type peerTag struct { - done chan struct{} - peers map[peer.ID]int -} - -type fakePeerTagger struct { - lk sync.Mutex - tags map[string]*peerTag -} - -func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { - fpt.lk.Lock() - defer fpt.lk.Unlock() - if fpt.tags == nil { - fpt.tags = make(map[string]*peerTag, 1) - } - pt, ok := fpt.tags[tag] - if !ok { - pt = &peerTag{peers: make(map[peer.ID]int, 1), done: make(chan struct{})} - fpt.tags[tag] = pt - } - pt.peers[p] = n -} - -func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { - fpt.lk.Lock() - defer fpt.lk.Unlock() - pt := fpt.tags[tag] - if pt == nil { - return - } - delete(pt.peers, p) - if len(pt.peers) == 0 { - close(pt.done) - delete(fpt.tags, tag) - } -} - -func (fpt *fakePeerTagger) count(tag string) int { - fpt.lk.Lock() - defer fpt.lk.Unlock() - if pt, ok := fpt.tags[tag]; ok { - return len(pt.peers) - } - return 0 -} - -func (fpt *fakePeerTagger) wait(tag string) { - fpt.lk.Lock() - pt := fpt.tags[tag] - if pt == nil { - fpt.lk.Unlock() - return - } - doneCh := pt.done - fpt.lk.Unlock() - <-doneCh -} - -type engineSet struct { - PeerTagger *fakePeerTagger - Peer peer.ID - Engine *Engine - Blockstore blockstore.Blockstore -} - -func newTestEngine(ctx context.Context, idStr string, opts ...Option) engineSet { - return newTestEngineWithSampling(ctx, idStr, shortTerm, nil, clock.New(), opts...) -} - -func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock, opts ...Option) engineSet { - fpt := &fakePeerTagger{} - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, append(opts[:len(opts):len(opts)], WithScoreLedger(NewTestScoreLedger(peerSampleInterval, sampleCh, clock)), WithBlockstoreWorkerCount(4))...) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - return engineSet{ - Peer: peer.ID(idStr), - //Strategy: New(true), - PeerTagger: fpt, - Blockstore: bs, - Engine: e, - } -} - -func TestConsistentAccounting(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - sender := newTestEngine(ctx, "Ernie") - receiver := newTestEngine(ctx, "Bert") - - // Send messages from Ernie to Bert - for i := 0; i < 1000; i++ { - - m := message.New(false) - content := []string{"this", "is", "message", "i"} - m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) - - sender.Engine.MessageSent(receiver.Peer, m) - receiver.Engine.MessageReceived(ctx, sender.Peer, m) - receiver.Engine.ReceivedBlocks(sender.Peer, m.Blocks()) - } - - // Ensure sender records the change - if sender.Engine.numBytesSentTo(receiver.Peer) == 0 { - t.Fatal("Sent bytes were not recorded") - } - - // Ensure sender and receiver have the same values - if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) { - t.Fatal("Inconsistent book-keeping. Strategies don't agree") - } - - // Ensure sender didn't record receving anything. And that the receiver - // didn't record sending anything - if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 { - t.Fatal("Bert didn't send bytes to Ernie") - } -} - -func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - sanfrancisco := newTestEngine(ctx, "sf") - seattle := newTestEngine(ctx, "sea") - - m := message.New(true) - - sanfrancisco.Engine.MessageSent(seattle.Peer, m) - seattle.Engine.MessageReceived(ctx, sanfrancisco.Peer, m) - - if seattle.Peer == sanfrancisco.Peer { - t.Fatal("Sanity Check: Peers have same Key!") - } - - if !peerIsPartner(seattle.Peer, sanfrancisco.Engine) { - t.Fatal("Peer wasn't added as a Partner") - } - - if !peerIsPartner(sanfrancisco.Peer, seattle.Engine) { - t.Fatal("Peer wasn't added as a Partner") - } - - seattle.Engine.PeerDisconnected(sanfrancisco.Peer) - if peerIsPartner(sanfrancisco.Peer, seattle.Engine) { - t.Fatal("expected peer to be removed") - } -} - -func peerIsPartner(p peer.ID, e *Engine) bool { - for _, partner := range e.Peers() { - if partner == p { - return true - } - } - return false -} - -func newEngineForTesting( - ctx context.Context, - bs blockstore.Blockstore, - peerTagger PeerTagger, - self peer.ID, - maxReplaceSize int, - opts ...Option, -) *Engine { - return newEngine( - ctx, - bs, - peerTagger, - self, - maxReplaceSize, - opts..., - ) -} - -func TestOutboxClosedWhenEngineClosed(t *testing.T) { - t.SkipNow() // TODO implement *Engine.Close - ctx := context.Background() - e := newEngineForTesting(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - var wg sync.WaitGroup - wg.Add(1) - go func() { - for nextEnvelope := range e.Outbox() { - <-nextEnvelope - } - wg.Done() - }() - // e.Close() - wg.Wait() - if _, ok := <-e.Outbox(); ok { - t.Fatal("channel should be closed") - } -} - -func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { - alphabet := "abcdefghijklmnopqrstuvwxyz" - vowels := "aeiou" - - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - for _, letter := range strings.Split(alphabet, "") { - block := blocks.NewBlock([]byte(letter)) - if err := bs.Put(context.Background(), block); err != nil { - t.Fatal(err) - } - } - - partner := libp2ptest.RandPeerIDFatal(t) - // partnerWantBlocks(e, vowels, partner) - - type testCaseEntry struct { - wantBlks string - wantHaves string - sendDontHave bool - } - - type testCaseExp struct { - blks string - haves string - dontHaves string - } - - type testCase struct { - only bool - wls []testCaseEntry - exp []testCaseExp - } - - testCases := []testCase{ - // Just send want-blocks - { - wls: []testCaseEntry{ - { - wantBlks: vowels, - sendDontHave: false, - }, - }, - exp: []testCaseExp{ - { - blks: vowels, - }, - }, - }, - - // Send want-blocks and want-haves - { - wls: []testCaseEntry{ - { - wantBlks: vowels, - wantHaves: "fgh", - sendDontHave: false, - }, - }, - exp: []testCaseExp{ - { - blks: vowels, - haves: "fgh", - }, - }, - }, - - // Send want-blocks and want-haves, with some want-haves that are not - // present, but without requesting DONT_HAVES - { - wls: []testCaseEntry{ - { - wantBlks: vowels, - wantHaves: "fgh123", - sendDontHave: false, - }, - }, - exp: []testCaseExp{ - { - blks: vowels, - haves: "fgh", - }, - }, - }, - - // Send want-blocks and want-haves, with some want-haves that are not - // present, and request DONT_HAVES - { - wls: []testCaseEntry{ - { - wantBlks: vowels, - wantHaves: "fgh123", - sendDontHave: true, - }, - }, - exp: []testCaseExp{ - { - blks: vowels, - haves: "fgh", - dontHaves: "123", - }, - }, - }, - - // Send want-blocks and want-haves, with some want-blocks and want-haves that are not - // present, but without requesting DONT_HAVES - { - wls: []testCaseEntry{ - { - wantBlks: "aeiou123", - wantHaves: "fgh456", - sendDontHave: false, - }, - }, - exp: []testCaseExp{ - { - blks: "aeiou", - haves: "fgh", - dontHaves: "", - }, - }, - }, - - // Send want-blocks and want-haves, with some want-blocks and want-haves that are not - // present, and request DONT_HAVES - { - wls: []testCaseEntry{ - { - wantBlks: "aeiou123", - wantHaves: "fgh456", - sendDontHave: true, - }, - }, - exp: []testCaseExp{ - { - blks: "aeiou", - haves: "fgh", - dontHaves: "123456", - }, - }, - }, - - // Send repeated want-blocks - { - wls: []testCaseEntry{ - { - wantBlks: "ae", - sendDontHave: false, - }, - { - wantBlks: "io", - sendDontHave: false, - }, - { - wantBlks: "u", - sendDontHave: false, - }, - }, - exp: []testCaseExp{ - { - blks: "aeiou", - }, - }, - }, - - // Send repeated want-blocks and want-haves - { - wls: []testCaseEntry{ - { - wantBlks: "ae", - wantHaves: "jk", - sendDontHave: false, - }, - { - wantBlks: "io", - wantHaves: "lm", - sendDontHave: false, - }, - { - wantBlks: "u", - sendDontHave: false, - }, - }, - exp: []testCaseExp{ - { - blks: "aeiou", - haves: "jklm", - }, - }, - }, - - // Send repeated want-blocks and want-haves, with some want-blocks and want-haves that are not - // present, and request DONT_HAVES - { - wls: []testCaseEntry{ - { - wantBlks: "ae12", - wantHaves: "jk5", - sendDontHave: true, - }, - { - wantBlks: "io34", - wantHaves: "lm", - sendDontHave: true, - }, - { - wantBlks: "u", - wantHaves: "6", - sendDontHave: true, - }, - }, - exp: []testCaseExp{ - { - blks: "aeiou", - haves: "jklm", - dontHaves: "123456", - }, - }, - }, - - // Send want-block then want-have for same CID - { - wls: []testCaseEntry{ - { - wantBlks: "a", - sendDontHave: true, - }, - { - wantHaves: "a", - sendDontHave: true, - }, - }, - // want-have should be ignored because there was already a - // want-block for the same CID in the queue - exp: []testCaseExp{ - { - blks: "a", - }, - }, - }, - - // Send want-have then want-block for same CID - { - wls: []testCaseEntry{ - { - wantHaves: "b", - sendDontHave: true, - }, - { - wantBlks: "b", - sendDontHave: true, - }, - }, - // want-block should overwrite existing want-have - exp: []testCaseExp{ - { - blks: "b", - }, - }, - }, - - // Send want-block then want-block for same CID - { - wls: []testCaseEntry{ - { - wantBlks: "a", - sendDontHave: true, - }, - { - wantBlks: "a", - sendDontHave: true, - }, - }, - // second want-block should be ignored - exp: []testCaseExp{ - { - blks: "a", - }, - }, - }, - - // Send want-have then want-have for same CID - { - wls: []testCaseEntry{ - { - wantHaves: "a", - sendDontHave: true, - }, - { - wantHaves: "a", - sendDontHave: true, - }, - }, - // second want-have should be ignored - exp: []testCaseExp{ - { - haves: "a", - }, - }, - }, - } - - var onlyTestCases []testCase - for _, testCase := range testCases { - if testCase.only { - onlyTestCases = append(onlyTestCases, testCase) - } - } - if len(onlyTestCases) > 0 { - testCases = onlyTestCases - } - - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - for i, testCase := range testCases { - t.Logf("Test case %d:", i) - for _, wl := range testCase.wls { - t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", - wl.wantBlks, wl.wantHaves, wl.sendDontHave) - wantBlks := strings.Split(wl.wantBlks, "") - wantHaves := strings.Split(wl.wantHaves, "") - partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) - } - - for _, exp := range testCase.exp { - expBlks := strings.Split(exp.blks, "") - expHaves := strings.Split(exp.haves, "") - expDontHaves := strings.Split(exp.dontHaves, "") - - next := <-e.Outbox() - env := <-next - err := checkOutput(t, e, env, expBlks, expHaves, expDontHaves) - if err != nil { - t.Fatal(err) - } - env.Sent() - } - } -} - -func TestPartnerWantHaveWantBlockActive(t *testing.T) { - alphabet := "abcdefghijklmnopqrstuvwxyz" - - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - for _, letter := range strings.Split(alphabet, "") { - block := blocks.NewBlock([]byte(letter)) - if err := bs.Put(context.Background(), block); err != nil { - t.Fatal(err) - } - } - - partner := libp2ptest.RandPeerIDFatal(t) - - type testCaseEntry struct { - wantBlks string - wantHaves string - sendDontHave bool - } - - type testCaseExp struct { - blks string - haves string - dontHaves string - } - - type testCase struct { - only bool - wls []testCaseEntry - exp []testCaseExp - } - - testCases := []testCase{ - // Send want-block then want-have for same CID - { - wls: []testCaseEntry{ - { - wantBlks: "a", - sendDontHave: true, - }, - { - wantHaves: "a", - sendDontHave: true, - }, - }, - // want-have should be ignored because there was already a - // want-block for the same CID in the queue - exp: []testCaseExp{ - { - blks: "a", - }, - }, - }, - - // Send want-have then want-block for same CID - { - wls: []testCaseEntry{ - { - wantHaves: "b", - sendDontHave: true, - }, - { - wantBlks: "b", - sendDontHave: true, - }, - }, - // want-have is active when want-block is added, so want-have - // should get sent, then want-block - exp: []testCaseExp{ - { - haves: "b", - }, - { - blks: "b", - }, - }, - }, - - // Send want-block then want-block for same CID - { - wls: []testCaseEntry{ - { - wantBlks: "a", - sendDontHave: true, - }, - { - wantBlks: "a", - sendDontHave: true, - }, - }, - // second want-block should be ignored - exp: []testCaseExp{ - { - blks: "a", - }, - }, - }, - - // Send want-have then want-have for same CID - { - wls: []testCaseEntry{ - { - wantHaves: "a", - sendDontHave: true, - }, - { - wantHaves: "a", - sendDontHave: true, - }, - }, - // second want-have should be ignored - exp: []testCaseExp{ - { - haves: "a", - }, - }, - }, - } - - var onlyTestCases []testCase - for _, testCase := range testCases { - if testCase.only { - onlyTestCases = append(onlyTestCases, testCase) - } - } - if len(onlyTestCases) > 0 { - testCases = onlyTestCases - } - - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - - var next envChan - for i, testCase := range testCases { - envs := make([]*Envelope, 0) - - t.Logf("Test case %d:", i) - for _, wl := range testCase.wls { - t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", - wl.wantBlks, wl.wantHaves, wl.sendDontHave) - wantBlks := strings.Split(wl.wantBlks, "") - wantHaves := strings.Split(wl.wantHaves, "") - partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) - - var env *Envelope - next, env = getNextEnvelope(e, next, 5*time.Millisecond) - if env != nil { - envs = append(envs, env) - } - } - - if len(envs) != len(testCase.exp) { - t.Fatalf("Expected %d envelopes but received %d", len(testCase.exp), len(envs)) - } - - for i, exp := range testCase.exp { - expBlks := strings.Split(exp.blks, "") - expHaves := strings.Split(exp.haves, "") - expDontHaves := strings.Split(exp.dontHaves, "") - - err := checkOutput(t, e, envs[i], expBlks, expHaves, expDontHaves) - if err != nil { - t.Fatal(err) - } - envs[i].Sent() - } - } -} - -func checkOutput(t *testing.T, e *Engine, envelope *Envelope, expBlks []string, expHaves []string, expDontHaves []string) error { - blks := envelope.Message.Blocks() - presences := envelope.Message.BlockPresences() - - // Verify payload message length - if len(blks) != len(expBlks) { - blkDiff := formatBlocksDiff(blks, expBlks) - msg := fmt.Sprintf("Received %d blocks. Expected %d blocks:\n%s", len(blks), len(expBlks), blkDiff) - return errors.New(msg) - } - - // Verify block presences message length - expPresencesCount := len(expHaves) + len(expDontHaves) - if len(presences) != expPresencesCount { - presenceDiff := formatPresencesDiff(presences, expHaves, expDontHaves) - return fmt.Errorf("Received %d BlockPresences. Expected %d BlockPresences:\n%s", - len(presences), expPresencesCount, presenceDiff) - } - - // Verify payload message contents - for _, k := range expBlks { - found := false - expected := blocks.NewBlock([]byte(k)) - for _, block := range blks { - if block.Cid().Equals(expected.Cid()) { - found = true - break - } - } - if !found { - return errors.New(formatBlocksDiff(blks, expBlks)) - } - } - - // Verify HAVEs - if err := checkPresence(presences, expHaves, pb.Message_Have); err != nil { - return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) - } - - // Verify DONT_HAVEs - if err := checkPresence(presences, expDontHaves, pb.Message_DontHave); err != nil { - return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) - } - - return nil -} - -func checkPresence(presences []message.BlockPresence, expPresence []string, presenceType pb.Message_BlockPresenceType) error { - for _, k := range expPresence { - found := false - expected := blocks.NewBlock([]byte(k)) - for _, p := range presences { - if p.Cid.Equals(expected.Cid()) { - found = true - if p.Type != presenceType { - return errors.New("type mismatch") - } - break - } - } - if !found { - return errors.New("not found") - } - } - return nil -} - -func formatBlocksDiff(blks []blocks.Block, expBlks []string) string { - var out bytes.Buffer - out.WriteString(fmt.Sprintf("Blocks (%d):\n", len(blks))) - for _, b := range blks { - out.WriteString(fmt.Sprintf(" %s: %s\n", b.Cid(), b.RawData())) - } - out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expBlks))) - for _, k := range expBlks { - expected := blocks.NewBlock([]byte(k)) - out.WriteString(fmt.Sprintf(" %s: %s\n", expected.Cid(), k)) - } - return out.String() -} - -func formatPresencesDiff(presences []message.BlockPresence, expHaves []string, expDontHaves []string) string { - var out bytes.Buffer - out.WriteString(fmt.Sprintf("BlockPresences (%d):\n", len(presences))) - for _, p := range presences { - t := "HAVE" - if p.Type == pb.Message_DontHave { - t = "DONT_HAVE" - } - out.WriteString(fmt.Sprintf(" %s - %s\n", p.Cid, t)) - } - out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expHaves)+len(expDontHaves))) - for _, k := range expHaves { - expected := blocks.NewBlock([]byte(k)) - out.WriteString(fmt.Sprintf(" %s: %s - HAVE\n", expected.Cid(), k)) - } - for _, k := range expDontHaves { - expected := blocks.NewBlock([]byte(k)) - out.WriteString(fmt.Sprintf(" %s: %s - DONT_HAVE\n", expected.Cid(), k)) - } - return out.String() -} - -func TestPartnerWantsThenCancels(t *testing.T) { - numRounds := 10 - if testing.Short() { - numRounds = 1 - } - alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") - vowels := strings.Split("aeiou", "") - - type testCase [][]string - testcases := []testCase{ - { - alphabet, vowels, - }, - { - alphabet, stringsComplement(alphabet, vowels), - alphabet[1:25], stringsComplement(alphabet[1:25], vowels), alphabet[2:25], stringsComplement(alphabet[2:25], vowels), - alphabet[3:25], stringsComplement(alphabet[3:25], vowels), alphabet[4:25], stringsComplement(alphabet[4:25], vowels), - alphabet[5:25], stringsComplement(alphabet[5:25], vowels), alphabet[6:25], stringsComplement(alphabet[6:25], vowels), - alphabet[7:25], stringsComplement(alphabet[7:25], vowels), alphabet[8:25], stringsComplement(alphabet[8:25], vowels), - alphabet[9:25], stringsComplement(alphabet[9:25], vowels), alphabet[10:25], stringsComplement(alphabet[10:25], vowels), - alphabet[11:25], stringsComplement(alphabet[11:25], vowels), alphabet[12:25], stringsComplement(alphabet[12:25], vowels), - alphabet[13:25], stringsComplement(alphabet[13:25], vowels), alphabet[14:25], stringsComplement(alphabet[14:25], vowels), - alphabet[15:25], stringsComplement(alphabet[15:25], vowels), alphabet[16:25], stringsComplement(alphabet[16:25], vowels), - alphabet[17:25], stringsComplement(alphabet[17:25], vowels), alphabet[18:25], stringsComplement(alphabet[18:25], vowels), - alphabet[19:25], stringsComplement(alphabet[19:25], vowels), alphabet[20:25], stringsComplement(alphabet[20:25], vowels), - alphabet[21:25], stringsComplement(alphabet[21:25], vowels), alphabet[22:25], stringsComplement(alphabet[22:25], vowels), - alphabet[23:25], stringsComplement(alphabet[23:25], vowels), alphabet[24:25], stringsComplement(alphabet[24:25], vowels), - alphabet[25:25], stringsComplement(alphabet[25:25], vowels), - }, - } - - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - for _, letter := range alphabet { - block := blocks.NewBlock([]byte(letter)) - if err := bs.Put(context.Background(), block); err != nil { - t.Fatal(err) - } - } - - ctx := context.Background() - for i := 0; i < numRounds; i++ { - expected := make([][]string, 0, len(testcases)) - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - for _, testcase := range testcases { - set := testcase[0] - cancels := testcase[1] - keeps := stringsComplement(set, cancels) - expected = append(expected, keeps) - - partner := libp2ptest.RandPeerIDFatal(t) - - partnerWantBlocks(e, set, partner) - partnerCancels(e, cancels, partner) - } - if err := checkHandledInOrder(t, e, expected); err != nil { - t.Logf("run #%d of %d", i, numRounds) - t.Fatal(err) - } - } -} - -func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - partner := libp2ptest.RandPeerIDFatal(t) - otherPeer := libp2ptest.RandPeerIDFatal(t) - - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - - blks := testutil.GenerateBlocksOfSize(4, 8*1024) - msg := message.New(false) - msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) - msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) - msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) - msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, false) - e.MessageReceived(context.Background(), partner, msg) - - // Nothing in blockstore, so shouldn't get any envelope - var next envChan - next, env := getNextEnvelope(e, next, 5*time.Millisecond) - if env != nil { - t.Fatal("expected no envelope yet") - } - - e.ReceivedBlocks(otherPeer, []blocks.Block{blks[0], blks[2]}) - if err := bs.PutMany(context.Background(), []blocks.Block{blks[0], blks[2]}); err != nil { - t.Fatal(err) - } - e.NotifyNewBlocks([]blocks.Block{blks[0], blks[2]}) - _, env = getNextEnvelope(e, next, 5*time.Millisecond) - if env == nil { - t.Fatal("expected envelope") - } - if env.Peer != partner { - t.Fatal("expected message to peer") - } - sentBlk := env.Message.Blocks() - if len(sentBlk) != 1 || !sentBlk[0].Cid().Equals(blks[2].Cid()) { - t.Fatal("expected 1 block") - } - sentHave := env.Message.BlockPresences() - if len(sentHave) != 1 || !sentHave[0].Cid.Equals(blks[0].Cid()) || sentHave[0].Type != pb.Message_Have { - t.Fatal("expected 1 HAVE") - } -} - -func TestSendDontHave(t *testing.T) { - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - partner := libp2ptest.RandPeerIDFatal(t) - otherPeer := libp2ptest.RandPeerIDFatal(t) - - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - - blks := testutil.GenerateBlocksOfSize(4, 8*1024) - msg := message.New(false) - msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) - msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, true) - msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) - msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, true) - e.MessageReceived(context.Background(), partner, msg) - - // Nothing in blockstore, should get DONT_HAVE for entries that wanted it - var next envChan - next, env := getNextEnvelope(e, next, 10*time.Millisecond) - if env == nil { - t.Fatal("expected envelope") - } - if env.Peer != partner { - t.Fatal("expected message to peer") - } - if len(env.Message.Blocks()) > 0 { - t.Fatal("expected no blocks") - } - sentDontHaves := env.Message.BlockPresences() - if len(sentDontHaves) != 2 { - t.Fatal("expected 2 DONT_HAVEs") - } - if !sentDontHaves[0].Cid.Equals(blks[1].Cid()) && - !sentDontHaves[1].Cid.Equals(blks[1].Cid()) { - t.Fatal("expected DONT_HAVE for want-have") - } - if !sentDontHaves[0].Cid.Equals(blks[3].Cid()) && - !sentDontHaves[1].Cid.Equals(blks[3].Cid()) { - t.Fatal("expected DONT_HAVE for want-block") - } - - // Receive all the blocks - e.ReceivedBlocks(otherPeer, []blocks.Block{blks[0], blks[2]}) - if err := bs.PutMany(context.Background(), blks); err != nil { - t.Fatal(err) - } - e.NotifyNewBlocks(blks) - - // Envelope should contain 2 HAVEs / 2 blocks - _, env = getNextEnvelope(e, next, 10*time.Millisecond) - if env == nil { - t.Fatal("expected envelope") - } - if env.Peer != partner { - t.Fatal("expected message to peer") - } - if len(env.Message.Blocks()) != 2 { - t.Fatal("expected 2 blocks") - } - sentHave := env.Message.BlockPresences() - if len(sentHave) != 2 || sentHave[0].Type != pb.Message_Have || sentHave[1].Type != pb.Message_Have { - t.Fatal("expected 2 HAVEs") - } -} - -func TestWantlistForPeer(t *testing.T) { - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - partner := libp2ptest.RandPeerIDFatal(t) - otherPeer := libp2ptest.RandPeerIDFatal(t) - - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - - blks := testutil.GenerateBlocksOfSize(4, 8*1024) - msg := message.New(false) - msg.AddEntry(blks[0].Cid(), 2, pb.Message_Wantlist_Have, false) - msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) - e.MessageReceived(context.Background(), partner, msg) - - msg2 := message.New(false) - msg2.AddEntry(blks[2].Cid(), 1, pb.Message_Wantlist_Block, false) - msg2.AddEntry(blks[3].Cid(), 4, pb.Message_Wantlist_Block, false) - e.MessageReceived(context.Background(), partner, msg2) - - entries := e.WantlistForPeer(otherPeer) - if len(entries) != 0 { - t.Fatal("expected wantlist to contain no wants for other peer") - } - - entries = e.WantlistForPeer(partner) - if len(entries) != 4 { - t.Fatal("expected wantlist to contain all wants from parter") - } - if entries[0].Priority != 4 || entries[1].Priority != 3 || entries[2].Priority != 2 || entries[3].Priority != 1 { - t.Fatal("expected wantlist to be sorted") - } - -} - -func TestTaskComparator(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - keys := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"} - cids := make(map[cid.Cid]int) - blks := make([]blocks.Block, 0, len(keys)) - for i, letter := range keys { - block := blocks.NewBlock([]byte(letter)) - blks = append(blks, block) - cids[block.Cid()] = i - } - - fpt := &fakePeerTagger{} - sl := NewTestScoreLedger(shortTerm, nil, clock.New()) - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - if err := bs.PutMany(ctx, blks); err != nil { - t.Fatal(err) - } - - // use a single task worker so that the order of outgoing messages is deterministic - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), WithTaskWorkerCount(1), - // if this Option is omitted, the test fails - WithTaskComparator(func(ta, tb *TaskInfo) bool { - // prioritize based on lexicographic ordering of block content - return cids[ta.Cid] < cids[tb.Cid] - }), - ) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - - // rely on randomness of Go map's iteration order to add Want entries in random order - peerIDs := make([]peer.ID, len(keys)) - for _, i := range cids { - peerID := libp2ptest.RandPeerIDFatal(t) - peerIDs[i] = peerID - partnerWantBlocks(e, keys[i:i+1], peerID) - } - - // check that outgoing messages are sent in the correct order - for i, peerID := range peerIDs { - next := <-e.Outbox() - envelope := <-next - if peerID != envelope.Peer { - t.Errorf("expected message for peer ID %#v but instead got message for peer ID %#v", peerID, envelope.Peer) - } - responseBlocks := envelope.Message.Blocks() - if len(responseBlocks) != 1 { - t.Errorf("expected 1 block in response but instead got %v", len(blks)) - } else if responseBlocks[0].Cid() != blks[i].Cid() { - t.Errorf("expected block with CID %#v but instead got block with CID %#v", blks[i].Cid(), responseBlocks[0].Cid()) - } - } -} - -func TestPeerBlockFilter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - // Generate a few keys - keys := []string{"a", "b", "c", "d"} - blks := make([]blocks.Block, 0, len(keys)) - for _, letter := range keys { - block := blocks.NewBlock([]byte(letter)) - blks = append(blks, block) - } - - // Generate a few partner peers - peerIDs := make([]peer.ID, 3) - peerIDs[0] = libp2ptest.RandPeerIDFatal(t) - peerIDs[1] = libp2ptest.RandPeerIDFatal(t) - peerIDs[2] = libp2ptest.RandPeerIDFatal(t) - - // Setup the main peer - fpt := &fakePeerTagger{} - sl := NewTestScoreLedger(shortTerm, nil, clock.New()) - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - if err := bs.PutMany(ctx, blks); err != nil { - t.Fatal(err) - } - - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), - WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { - // peer 0 has access to everything - if p == peerIDs[0] { - return true - } - // peer 1 can only access key c and d - if p == peerIDs[1] { - return blks[2].Cid().Equals(c) || blks[3].Cid().Equals(c) - } - // peer 2 and other can only access key d - return blks[3].Cid().Equals(c) - }), - ) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - - // Setup the test - type testCaseEntry struct { - peerIndex int - wantBlks string - wantHaves string - } - - type testCaseExp struct { - blks string - haves string - dontHaves string - } - - type testCase struct { - only bool - wl testCaseEntry - exp testCaseExp - } - - testCases := []testCase{ - // Peer 0 has access to everything: want-block `a` succeeds. - { - wl: testCaseEntry{ - peerIndex: 0, - wantBlks: "a", - }, - exp: testCaseExp{ - blks: "a", - }, - }, - // Peer 0 has access to everything: want-have `b` succeeds. - { - wl: testCaseEntry{ - peerIndex: 0, - wantHaves: "b1", - }, - exp: testCaseExp{ - haves: "b", - dontHaves: "1", - }, - }, - // Peer 1 has access to [c, d]: want-have `a` result in dont-have. - { - wl: testCaseEntry{ - peerIndex: 1, - wantHaves: "ac", - }, - exp: testCaseExp{ - haves: "c", - dontHaves: "a", - }, - }, - // Peer 1 has access to [c, d]: want-block `b` result in dont-have. - { - wl: testCaseEntry{ - peerIndex: 1, - wantBlks: "bd", - }, - exp: testCaseExp{ - blks: "d", - dontHaves: "b", - }, - }, - // Peer 2 has access to [d]: want-have `a` and want-block `b` result in dont-have. - { - wl: testCaseEntry{ - peerIndex: 2, - wantHaves: "a", - wantBlks: "bcd1", - }, - exp: testCaseExp{ - haves: "", - blks: "d", - dontHaves: "abc1", - }, - }, - } - - var onlyTestCases []testCase - for _, testCase := range testCases { - if testCase.only { - onlyTestCases = append(onlyTestCases, testCase) - } - } - if len(onlyTestCases) > 0 { - testCases = onlyTestCases - } - - for i, testCase := range testCases { - // Create wants requests - wl := testCase.wl - - t.Logf("test case %v: Peer%v / want-blocks '%s' / want-haves '%s'", - i, wl.peerIndex, wl.wantBlks, wl.wantHaves) - - wantBlks := strings.Split(wl.wantBlks, "") - wantHaves := strings.Split(wl.wantHaves, "") - - partnerWantBlocksHaves(e, wantBlks, wantHaves, true, peerIDs[wl.peerIndex]) - - // Check result - exp := testCase.exp - - next := <-e.Outbox() - envelope := <-next - - expBlks := strings.Split(exp.blks, "") - expHaves := strings.Split(exp.haves, "") - expDontHaves := strings.Split(exp.dontHaves, "") - - err := checkOutput(t, e, envelope, expBlks, expHaves, expDontHaves) - if err != nil { - t.Fatal(err) - } - } -} - -func TestPeerBlockFilterMutability(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - // Generate a few keys - keys := []string{"a", "b", "c", "d"} - blks := make([]blocks.Block, 0, len(keys)) - for _, letter := range keys { - block := blocks.NewBlock([]byte(letter)) - blks = append(blks, block) - } - - partnerID := libp2ptest.RandPeerIDFatal(t) - - // Setup the main peer - fpt := &fakePeerTagger{} - sl := NewTestScoreLedger(shortTerm, nil, clock.New()) - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - if err := bs.PutMany(ctx, blks); err != nil { - t.Fatal(err) - } - - filterAllowList := make(map[cid.Cid]bool) - - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), - WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { - return filterAllowList[c] - }), - ) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - - // Setup the test - type testCaseEntry struct { - allowList string - wantBlks string - wantHaves string - } - - type testCaseExp struct { - blks string - haves string - dontHaves string - } - - type testCase struct { - only bool - wls []testCaseEntry - exps []testCaseExp - } - - testCases := []testCase{ - { - wls: []testCaseEntry{ - { - // Peer has no accesses & request a want-block - allowList: "", - wantBlks: "a", - }, - { - // Then Peer is allowed access to a - allowList: "a", - wantBlks: "a", - }, - }, - exps: []testCaseExp{ - { - dontHaves: "a", - }, - { - blks: "a", - }, - }, - }, - { - wls: []testCaseEntry{ - { - // Peer has access to bc - allowList: "bc", - wantHaves: "bc", - }, - { - // Then Peer loses access to b - allowList: "c", - wantBlks: "bc", // Note: We request a block here to force a response from the node - }, - }, - exps: []testCaseExp{ - { - haves: "bc", - }, - { - blks: "c", - dontHaves: "b", - }, - }, - }, - { - wls: []testCaseEntry{ - { - // Peer has no accesses & request a want-have - allowList: "", - wantHaves: "d", - }, - { - // Then Peer gains access to d - allowList: "d", - wantHaves: "d", - }, - }, - exps: []testCaseExp{ - { - dontHaves: "d", - }, - { - haves: "d", - }, - }, - }, - } - - var onlyTestCases []testCase - for _, testCase := range testCases { - if testCase.only { - onlyTestCases = append(onlyTestCases, testCase) - } - } - if len(onlyTestCases) > 0 { - testCases = onlyTestCases - } - - for i, testCase := range testCases { - for j := range testCase.wls { - wl := testCase.wls[j] - exp := testCase.exps[j] - - // Create wants requests - t.Logf("test case %v, %v: allow-list '%s' / want-blocks '%s' / want-haves '%s'", - i, j, wl.allowList, wl.wantBlks, wl.wantHaves) - - allowList := strings.Split(wl.allowList, "") - wantBlks := strings.Split(wl.wantBlks, "") - wantHaves := strings.Split(wl.wantHaves, "") - - // Update the allow list - filterAllowList = make(map[cid.Cid]bool) - for _, letter := range allowList { - block := blocks.NewBlock([]byte(letter)) - filterAllowList[block.Cid()] = true - } - - // Send the request - partnerWantBlocksHaves(e, wantBlks, wantHaves, true, partnerID) - - // Check result - next := <-e.Outbox() - envelope := <-next - - expBlks := strings.Split(exp.blks, "") - expHaves := strings.Split(exp.haves, "") - expDontHaves := strings.Split(exp.dontHaves, "") - - err := checkOutput(t, e, envelope, expBlks, expHaves, expDontHaves) - if err != nil { - t.Fatal(err) - } - } - } -} - -func TestTaggingPeers(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - sanfrancisco := newTestEngine(ctx, "sf") - seattle := newTestEngine(ctx, "sea") - - keys := []string{"a", "b", "c", "d", "e"} - for _, letter := range keys { - block := blocks.NewBlock([]byte(letter)) - if err := sanfrancisco.Blockstore.Put(context.Background(), block); err != nil { - t.Fatal(err) - } - } - partnerWantBlocks(sanfrancisco.Engine, keys, seattle.Peer) - next := <-sanfrancisco.Engine.Outbox() - envelope := <-next - - if sanfrancisco.PeerTagger.count(sanfrancisco.Engine.tagQueued) != 1 { - t.Fatal("Incorrect number of peers tagged") - } - envelope.Sent() - <-sanfrancisco.Engine.Outbox() - sanfrancisco.PeerTagger.wait(sanfrancisco.Engine.tagQueued) - if sanfrancisco.PeerTagger.count(sanfrancisco.Engine.tagQueued) != 0 { - t.Fatal("Peers should be untagged but weren't") - } -} - -func TestTaggingUseful(t *testing.T) { - peerSampleIntervalHalf := 10 * time.Millisecond - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - sampleCh := make(chan struct{}) - mockClock := clock.NewMock() - me := newTestEngineWithSampling(ctx, "engine", peerSampleIntervalHalf*2, sampleCh, mockClock) - mockClock.Add(1 * time.Millisecond) - friend := peer.ID("friend") - - block := blocks.NewBlock([]byte("foobar")) - msg := message.New(false) - msg.AddBlock(block) - - for i := 0; i < 3; i++ { - if untagged := me.PeerTagger.count(me.Engine.tagUseful); untagged != 0 { - t.Fatalf("%d peers should be untagged but weren't", untagged) - } - mockClock.Add(peerSampleIntervalHalf) - me.Engine.MessageSent(friend, msg) - - mockClock.Add(peerSampleIntervalHalf) - <-sampleCh - - if tagged := me.PeerTagger.count(me.Engine.tagUseful); tagged != 1 { - t.Fatalf("1 peer should be tagged, but %d were", tagged) - } - - for j := 0; j < longTermRatio; j++ { - mockClock.Add(peerSampleIntervalHalf * 2) - <-sampleCh - } - } - - if me.PeerTagger.count(me.Engine.tagUseful) == 0 { - t.Fatal("peers should still be tagged due to long-term usefulness") - } - - for j := 0; j < longTermRatio; j++ { - mockClock.Add(peerSampleIntervalHalf * 2) - <-sampleCh - } - - if me.PeerTagger.count(me.Engine.tagUseful) == 0 { - t.Fatal("peers should still be tagged due to long-term usefulness") - } - - for j := 0; j < longTermRatio; j++ { - mockClock.Add(peerSampleIntervalHalf * 2) - <-sampleCh - } - - if me.PeerTagger.count(me.Engine.tagUseful) != 0 { - t.Fatal("peers should finally be untagged") - } -} - -func partnerWantBlocks(e *Engine, wantBlocks []string, partner peer.ID) { - add := message.New(false) - for i, letter := range wantBlocks { - block := blocks.NewBlock([]byte(letter)) - add.AddEntry(block.Cid(), int32(len(wantBlocks)-i), pb.Message_Wantlist_Block, true) - } - e.MessageReceived(context.Background(), partner, add) -} - -func partnerWantBlocksHaves(e *Engine, wantBlocks []string, wantHaves []string, sendDontHave bool, partner peer.ID) { - add := message.New(false) - priority := int32(len(wantHaves) + len(wantBlocks)) - for _, letter := range wantHaves { - block := blocks.NewBlock([]byte(letter)) - add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Have, sendDontHave) - priority-- - } - for _, letter := range wantBlocks { - block := blocks.NewBlock([]byte(letter)) - add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Block, sendDontHave) - priority-- - } - e.MessageReceived(context.Background(), partner, add) -} - -func partnerCancels(e *Engine, keys []string, partner peer.ID) { - cancels := message.New(false) - for _, k := range keys { - block := blocks.NewBlock([]byte(k)) - cancels.Cancel(block.Cid()) - } - e.MessageReceived(context.Background(), partner, cancels) -} - -type envChan <-chan *Envelope - -func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelope) { - ctx, cancel := context.WithTimeout(context.Background(), t) - defer cancel() - - if next == nil { - next = <-e.Outbox() // returns immediately - } - - select { - case env, ok := <-next: // blocks till next envelope ready - if !ok { - log.Warnf("got closed channel") - return nil, nil - } - return nil, env - case <-ctx.Done(): - // log.Warnf("got timeout") - } - return next, nil -} - -func checkHandledInOrder(t *testing.T, e *Engine, expected [][]string) error { - for _, keys := range expected { - next := <-e.Outbox() - envelope := <-next - received := envelope.Message.Blocks() - // Verify payload message length - if len(received) != len(keys) { - return errors.New(fmt.Sprintln("# blocks received", len(received), "# blocks expected", len(keys))) - } - // Verify payload message contents - for _, k := range keys { - found := false - expected := blocks.NewBlock([]byte(k)) - for _, block := range received { - if block.Cid().Equals(expected.Cid()) { - found = true - break - } - } - if !found { - return errors.New(fmt.Sprintln("received", received, "expected", string(expected.RawData()))) - } - } - } - return nil -} - -func stringsComplement(set, subset []string) []string { - m := make(map[string]struct{}) - for _, letter := range subset { - m[letter] = struct{}{} - } - var complement []string - for _, letter := range set { - if _, exists := m[letter]; !exists { - complement = append(complement, letter) - } - } - return complement -} diff --git a/server/internal/decision/ewma.go b/server/internal/decision/ewma.go deleted file mode 100644 index 80d7d86b..00000000 --- a/server/internal/decision/ewma.go +++ /dev/null @@ -1,5 +0,0 @@ -package decision - -func ewma(old, new, alpha float64) float64 { - return new*alpha + (1-alpha)*old -} diff --git a/server/internal/decision/ledger.go b/server/internal/decision/ledger.go deleted file mode 100644 index 9edc2756..00000000 --- a/server/internal/decision/ledger.go +++ /dev/null @@ -1,46 +0,0 @@ -package decision - -import ( - "sync" - - wl "github.com/ipfs/go-bitswap/client/wantlist" - pb "github.com/ipfs/go-bitswap/message/pb" - - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" -) - -func newLedger(p peer.ID) *ledger { - return &ledger{ - wantList: wl.New(), - Partner: p, - } -} - -// Keeps the wantlist for the partner. NOT threadsafe! -type ledger struct { - // Partner is the remote Peer. - Partner peer.ID - - // wantList is a (bounded, small) set of keys that Partner desires. - wantList *wl.Wantlist - - lk sync.RWMutex -} - -func (l *ledger) Wants(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) { - log.Debugf("peer %s wants %s", l.Partner, k) - l.wantList.Add(k, priority, wantType) -} - -func (l *ledger) CancelWant(k cid.Cid) bool { - return l.wantList.Remove(k) -} - -func (l *ledger) WantListContains(k cid.Cid) (wl.Entry, bool) { - return l.wantList.Contains(k) -} - -func (l *ledger) Entries() []wl.Entry { - return l.wantList.Entries() -} diff --git a/server/internal/decision/peer_ledger.go b/server/internal/decision/peer_ledger.go deleted file mode 100644 index c22322b2..00000000 --- a/server/internal/decision/peer_ledger.go +++ /dev/null @@ -1,46 +0,0 @@ -package decision - -import ( - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" -) - -type peerLedger struct { - cids map[cid.Cid]map[peer.ID]struct{} -} - -func newPeerLedger() *peerLedger { - return &peerLedger{cids: make(map[cid.Cid]map[peer.ID]struct{})} -} - -func (l *peerLedger) Wants(p peer.ID, k cid.Cid) { - m, ok := l.cids[k] - if !ok { - m = make(map[peer.ID]struct{}) - l.cids[k] = m - } - m[p] = struct{}{} -} - -func (l *peerLedger) CancelWant(p peer.ID, k cid.Cid) { - m, ok := l.cids[k] - if !ok { - return - } - delete(m, p) - if len(m) == 0 { - delete(l.cids, k) - } -} - -func (l *peerLedger) Peers(k cid.Cid) []peer.ID { - m, ok := l.cids[k] - if !ok { - return nil - } - peers := make([]peer.ID, 0, len(m)) - for p := range m { - peers = append(peers, p) - } - return peers -} diff --git a/server/internal/decision/scoreledger.go b/server/internal/decision/scoreledger.go deleted file mode 100644 index dbcf69d8..00000000 --- a/server/internal/decision/scoreledger.go +++ /dev/null @@ -1,353 +0,0 @@ -package decision - -import ( - "sync" - "time" - - "github.com/benbjohnson/clock" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -const ( - // the alpha for the EWMA used to track short term usefulness - shortTermAlpha = 0.5 - - // the alpha for the EWMA used to track long term usefulness - longTermAlpha = 0.05 - - // how frequently the engine should sample usefulness. Peers that - // interact every shortTerm time period are considered "active". - shortTerm = 10 * time.Second - - // long term ratio defines what "long term" means in terms of the - // shortTerm duration. Peers that interact once every longTermRatio are - // considered useful over the long term. - longTermRatio = 10 - - // long/short term scores for tagging peers - longTermScore = 10 // this is a high tag but it grows _very_ slowly. - shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. -) - -// Stores the data exchange relationship between two peers. -type scoreledger struct { - // Partner is the remote Peer. - partner peer.ID - - // tracks bytes sent... - bytesSent uint64 - - // ...and received. - bytesRecv uint64 - - // lastExchange is the time of the last data exchange. - lastExchange time.Time - - // These scores keep track of how useful we think this peer is. Short - // tracks short-term usefulness and long tracks long-term usefulness. - shortScore, longScore float64 - - // Score keeps track of the score used in the peer tagger. We track it - // here to avoid unnecessarily updating the tags in the connection manager. - score int - - // exchangeCount is the number of exchanges with this peer - exchangeCount uint64 - - // the record lock - lock sync.RWMutex - - clock clock.Clock -} - -// Receipt is a summary of the ledger for a given peer -// collecting various pieces of aggregated data for external -// reporting purposes. -type Receipt struct { - Peer string - Value float64 - Sent uint64 - Recv uint64 - Exchanged uint64 -} - -// Increments the sent counter. -func (l *scoreledger) AddToSentBytes(n int) { - l.lock.Lock() - defer l.lock.Unlock() - l.exchangeCount++ - l.lastExchange = l.clock.Now() - l.bytesSent += uint64(n) -} - -// Increments the received counter. -func (l *scoreledger) AddToReceivedBytes(n int) { - l.lock.Lock() - defer l.lock.Unlock() - l.exchangeCount++ - l.lastExchange = l.clock.Now() - l.bytesRecv += uint64(n) -} - -// Returns the Receipt for this ledger record. -func (l *scoreledger) Receipt() *Receipt { - l.lock.RLock() - defer l.lock.RUnlock() - - return &Receipt{ - Peer: l.partner.String(), - Value: float64(l.bytesSent) / float64(l.bytesRecv+1), - Sent: l.bytesSent, - Recv: l.bytesRecv, - Exchanged: l.exchangeCount, - } -} - -// DefaultScoreLedger is used by Engine as the default ScoreLedger. -type DefaultScoreLedger struct { - // the score func - scorePeer ScorePeerFunc - // is closed on Close - closing chan struct{} - // protects the fields immediatly below - lock sync.RWMutex - // ledgerMap lists score ledgers by their partner key. - ledgerMap map[peer.ID]*scoreledger - // how frequently the engine should sample peer usefulness - peerSampleInterval time.Duration - // used by the tests to detect when a sample is taken - sampleCh chan struct{} - clock clock.Clock -} - -// scoreWorker keeps track of how "useful" our peers are, updating scores in the -// connection manager. -// -// It does this by tracking two scores: short-term usefulness and long-term -// usefulness. Short-term usefulness is sampled frequently and highly weights -// new observations. Long-term usefulness is sampled less frequently and highly -// weights on long-term trends. -// -// In practice, we do this by keeping two EWMAs. If we see an interaction -// within the sampling period, we record the score, otherwise, we record a 0. -// The short-term one has a high alpha and is sampled every shortTerm period. -// The long-term one has a low alpha and is sampled every -// longTermRatio*shortTerm period. -// -// To calculate the final score, we sum the short-term and long-term scores then -// adjust it ±25% based on our debt ratio. Peers that have historically been -// more useful to us than we are to them get the highest score. -func (dsl *DefaultScoreLedger) scoreWorker() { - ticker := dsl.clock.Ticker(dsl.peerSampleInterval) - defer ticker.Stop() - - type update struct { - peer peer.ID - score int - } - var ( - lastShortUpdate, lastLongUpdate time.Time - updates []update - ) - - for i := 0; ; i = (i + 1) % longTermRatio { - var now time.Time - select { - case now = <-ticker.C: - case <-dsl.closing: - return - } - - // The long term update ticks every `longTermRatio` short - // intervals. - updateLong := i == 0 - - dsl.lock.Lock() - for _, l := range dsl.ledgerMap { - l.lock.Lock() - - // Update the short-term score. - if l.lastExchange.After(lastShortUpdate) { - l.shortScore = ewma(l.shortScore, shortTermScore, shortTermAlpha) - } else { - l.shortScore = ewma(l.shortScore, 0, shortTermAlpha) - } - - // Update the long-term score. - if updateLong { - if l.lastExchange.After(lastLongUpdate) { - l.longScore = ewma(l.longScore, longTermScore, longTermAlpha) - } else { - l.longScore = ewma(l.longScore, 0, longTermAlpha) - } - } - - // Calculate the new score. - // - // The accounting score adjustment prefers peers _we_ - // need over peers that need us. This doesn't help with - // leeching. - var lscore float64 - if l.bytesRecv == 0 { - lscore = 0 - } else { - lscore = float64(l.bytesRecv) / float64(l.bytesRecv+l.bytesSent) - } - score := int((l.shortScore + l.longScore) * (lscore*.5 + .75)) - - // Avoid updating the connection manager unless there's a change. This can be expensive. - if l.score != score { - // put these in a list so we can perform the updates outside _global_ the lock. - updates = append(updates, update{l.partner, score}) - l.score = score - } - l.lock.Unlock() - } - dsl.lock.Unlock() - - // record the times. - lastShortUpdate = now - if updateLong { - lastLongUpdate = now - } - - // apply the updates - for _, update := range updates { - dsl.scorePeer(update.peer, update.score) - } - // Keep the memory. It's not much and it saves us from having to allocate. - updates = updates[:0] - - // Used by the tests - if dsl.sampleCh != nil { - dsl.sampleCh <- struct{}{} - } - } -} - -// Returns the score ledger for the given peer or nil if that peer -// is not on the ledger. -func (dsl *DefaultScoreLedger) find(p peer.ID) *scoreledger { - // Take a read lock (as it's less expensive) to check if we have - // a ledger for the peer. - dsl.lock.RLock() - l, ok := dsl.ledgerMap[p] - dsl.lock.RUnlock() - if ok { - return l - } - return nil -} - -// Returns a new scoreledger. -func newScoreLedger(p peer.ID, clock clock.Clock) *scoreledger { - return &scoreledger{ - partner: p, - clock: clock, - } -} - -// Lazily instantiates a ledger. -func (dsl *DefaultScoreLedger) findOrCreate(p peer.ID) *scoreledger { - l := dsl.find(p) - if l != nil { - return l - } - - // There's no ledger, so take a write lock, then check again and - // create the ledger if necessary. - dsl.lock.Lock() - defer dsl.lock.Unlock() - l, ok := dsl.ledgerMap[p] - if !ok { - l = newScoreLedger(p, dsl.clock) - dsl.ledgerMap[p] = l - } - return l -} - -// GetReceipt returns aggregated data communication with a given peer. -func (dsl *DefaultScoreLedger) GetReceipt(p peer.ID) *Receipt { - l := dsl.find(p) - if l != nil { - return l.Receipt() - } - - // Return a blank receipt otherwise. - return &Receipt{ - Peer: p.String(), - Value: 0, - Sent: 0, - Recv: 0, - Exchanged: 0, - } -} - -// Starts the default ledger sampling process. -func (dsl *DefaultScoreLedger) Start(scorePeer ScorePeerFunc) { - dsl.init(scorePeer) - go dsl.scoreWorker() -} - -// Stops the sampling process. -func (dsl *DefaultScoreLedger) Stop() { - close(dsl.closing) -} - -// Initializes the score ledger. -func (dsl *DefaultScoreLedger) init(scorePeer ScorePeerFunc) { - dsl.lock.Lock() - defer dsl.lock.Unlock() - dsl.scorePeer = scorePeer -} - -// Increments the sent counter for the given peer. -func (dsl *DefaultScoreLedger) AddToSentBytes(p peer.ID, n int) { - l := dsl.findOrCreate(p) - l.AddToSentBytes(n) -} - -// Increments the received counter for the given peer. -func (dsl *DefaultScoreLedger) AddToReceivedBytes(p peer.ID, n int) { - l := dsl.findOrCreate(p) - l.AddToReceivedBytes(n) -} - -// PeerConnected should be called when a new peer connects, meaning -// we should open accounting. -func (dsl *DefaultScoreLedger) PeerConnected(p peer.ID) { - dsl.lock.Lock() - defer dsl.lock.Unlock() - _, ok := dsl.ledgerMap[p] - if !ok { - dsl.ledgerMap[p] = newScoreLedger(p, dsl.clock) - } -} - -// PeerDisconnected should be called when a peer disconnects to -// clean up the accounting. -func (dsl *DefaultScoreLedger) PeerDisconnected(p peer.ID) { - dsl.lock.Lock() - defer dsl.lock.Unlock() - delete(dsl.ledgerMap, p) -} - -// Creates a new instance of the default score ledger. -func NewDefaultScoreLedger() *DefaultScoreLedger { - return &DefaultScoreLedger{ - ledgerMap: make(map[peer.ID]*scoreledger), - closing: make(chan struct{}), - peerSampleInterval: shortTerm, - clock: clock.New(), - } -} - -// Creates a new instance of the default score ledger with testing -// parameters. -func NewTestScoreLedger(peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock) *DefaultScoreLedger { - dsl := NewDefaultScoreLedger() - dsl.peerSampleInterval = peerSampleInterval - dsl.sampleCh = sampleCh - dsl.clock = clock - return dsl -} diff --git a/server/internal/decision/taskmerger.go b/server/internal/decision/taskmerger.go deleted file mode 100644 index 191200e5..00000000 --- a/server/internal/decision/taskmerger.go +++ /dev/null @@ -1,87 +0,0 @@ -package decision - -import ( - "github.com/ipfs/go-peertaskqueue/peertask" -) - -// taskData is extra data associated with each task in the request queue -type taskData struct { - // Tasks can be want-have or want-block - IsWantBlock bool - // Whether to immediately send a response if the block is not found - SendDontHave bool - // The size of the block corresponding to the task - BlockSize int - // Whether the block was found - HaveBlock bool -} - -type taskMerger struct{} - -func newTaskMerger() *taskMerger { - return &taskMerger{} -} - -// The request queue uses this Method to decide if a newly pushed task has any -// new information beyond the tasks with the same Topic (CID) in the queue. -func (*taskMerger) HasNewInfo(task peertask.Task, existing []*peertask.Task) bool { - haveSize := false - isWantBlock := false - for _, et := range existing { - etd := et.Data.(*taskData) - if etd.HaveBlock { - haveSize = true - } - - if etd.IsWantBlock { - isWantBlock = true - } - } - - // If there is no active want-block and the new task is a want-block, - // the new task is better - newTaskData := task.Data.(*taskData) - if !isWantBlock && newTaskData.IsWantBlock { - return true - } - - // If there is no size information for the CID and the new task has - // size information, the new task is better - if !haveSize && newTaskData.HaveBlock { - return true - } - - return false -} - -// The request queue uses Merge to merge a newly pushed task with an existing -// task with the same Topic (CID) -func (*taskMerger) Merge(task peertask.Task, existing *peertask.Task) { - newTask := task.Data.(*taskData) - existingTask := existing.Data.(*taskData) - - // If we now have block size information, update the task with - // the new block size - if !existingTask.HaveBlock && newTask.HaveBlock { - existingTask.HaveBlock = newTask.HaveBlock - existingTask.BlockSize = newTask.BlockSize - } - - // If replacing a want-have with a want-block - if !existingTask.IsWantBlock && newTask.IsWantBlock { - // Change the type from want-have to want-block - existingTask.IsWantBlock = true - // If the want-have was a DONT_HAVE, or the want-block has a size - if !existingTask.HaveBlock || newTask.HaveBlock { - // Update the entry size - existingTask.HaveBlock = newTask.HaveBlock - existing.Work = task.Work - } - } - - // If the task is a want-block, make sure the entry size is equal - // to the block size (because we will send the whole block) - if existingTask.IsWantBlock && existingTask.HaveBlock { - existing.Work = existingTask.BlockSize - } -} diff --git a/server/internal/decision/taskmerger_test.go b/server/internal/decision/taskmerger_test.go deleted file mode 100644 index eb79f156..00000000 --- a/server/internal/decision/taskmerger_test.go +++ /dev/null @@ -1,357 +0,0 @@ -package decision - -import ( - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" - "github.com/ipfs/go-peertaskqueue" - "github.com/ipfs/go-peertaskqueue/peertask" -) - -func TestPushHaveVsBlock(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] - - wantHave := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 1, - Data: &taskData{ - IsWantBlock: false, - BlockSize: 10, - HaveBlock: true, - SendDontHave: false, - }, - } - wantBlock := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 10, - Data: &taskData{ - IsWantBlock: true, - BlockSize: 10, - HaveBlock: true, - SendDontHave: false, - }, - } - - runTestCase := func(tasks []peertask.Task, expIsWantBlock bool) { - tasks = cloneTasks(tasks) - ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) - ptq.PushTasks(partner, tasks...) - _, popped, _ := ptq.PopTasks(100) - if len(popped) != 1 { - t.Fatalf("Expected 1 task, received %d tasks", len(popped)) - } - isWantBlock := popped[0].Data.(*taskData).IsWantBlock - if isWantBlock != expIsWantBlock { - t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, isWantBlock) - } - } - const wantBlockType = true - const wantHaveType = false - - // should ignore second want-have - runTestCase([]peertask.Task{wantHave, wantHave}, wantHaveType) - // should ignore second want-block - runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlockType) - // want-have does not overwrite want-block - runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlockType) - // want-block overwrites want-have - runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlockType) -} - -func TestPushSizeInfo(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] - - wantBlockBlockSize := 10 - wantBlockDontHaveBlockSize := 0 - wantHaveBlockSize := 10 - wantHaveDontHaveBlockSize := 0 - wantBlock := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 10, - Data: &taskData{ - IsWantBlock: true, - BlockSize: wantBlockBlockSize, - HaveBlock: true, - SendDontHave: false, - }, - } - wantBlockDontHave := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 2, - Data: &taskData{ - IsWantBlock: true, - BlockSize: wantBlockDontHaveBlockSize, - HaveBlock: false, - SendDontHave: false, - }, - } - wantHave := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 1, Data: &taskData{ - IsWantBlock: false, - BlockSize: wantHaveBlockSize, - HaveBlock: true, - SendDontHave: false, - }, - } - wantHaveDontHave := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 1, - Data: &taskData{ - IsWantBlock: false, - BlockSize: wantHaveDontHaveBlockSize, - HaveBlock: false, - SendDontHave: false, - }, - } - - runTestCase := func(tasks []peertask.Task, expSize int, expBlockSize int, expIsWantBlock bool) { - tasks = cloneTasks(tasks) - ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) - ptq.PushTasks(partner, tasks...) - _, popped, _ := ptq.PopTasks(100) - if len(popped) != 1 { - t.Fatalf("Expected 1 task, received %d tasks", len(popped)) - } - if popped[0].Work != expSize { - t.Fatalf("Expected task.Work to be %d, received %d", expSize, popped[0].Work) - } - td := popped[0].Data.(*taskData) - if td.BlockSize != expBlockSize { - t.Fatalf("Expected task.Work to be %d, received %d", expBlockSize, td.BlockSize) - } - if td.IsWantBlock != expIsWantBlock { - t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, td.IsWantBlock) - } - } - - isWantBlock := true - isWantHave := false - - // want-block (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) - runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) - // want-have (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) - runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) - // want-block with size should update existing want-block (DONT_HAVE) - runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) - // want-have with size should update existing want-block (DONT_HAVE) size, - // but leave it as a want-block (ie should not change it to want-have) - runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) - - // want-block (DONT_HAVE) size should not update existing want-block with size - runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) - // want-have (DONT_HAVE) should have no effect on existing want-block with size - runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) - // want-block with size should have no effect on existing want-block with size - runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) - // want-have with size should have no effect on existing want-block with size - runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) - - // want-block (DONT_HAVE) should update type and entry size of existing want-have (DONT_HAVE) - runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) - // want-have (DONT_HAVE) should have no effect on existing want-have (DONT_HAVE) - runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, wantHaveDontHave.Work, wantHaveDontHaveBlockSize, isWantHave) - // want-block with size should update existing want-have (DONT_HAVE) - runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) - // want-have with size should update existing want-have (DONT_HAVE) - runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) - - // want-block (DONT_HAVE) should update type and entry size of existing want-have with size - runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) - // want-have (DONT_HAVE) should not update existing want-have with size - runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, wantHave.Work, wantHaveBlockSize, isWantHave) - // want-block with size should update type and entry size of existing want-have with size - runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) - // want-have should have no effect on existing want-have - runTestCase([]peertask.Task{wantHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) -} - -func TestPushHaveVsBlockActive(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] - - wantBlock := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 10, - Data: &taskData{ - IsWantBlock: true, - BlockSize: 10, - HaveBlock: true, - SendDontHave: false, - }, - } - wantHave := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 1, - Data: &taskData{ - IsWantBlock: false, - BlockSize: 10, - HaveBlock: true, - SendDontHave: false, - }, - } - - runTestCase := func(tasks []peertask.Task, expCount int) { - tasks = cloneTasks(tasks) - ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) - // ptq.PushTasks(partner, tasks...) - var popped []*peertask.Task - for _, task := range tasks { - // Push the task - // tracker.PushTasks([]peertask.Task{task}) - ptq.PushTasks(partner, task) - // Pop the task (which makes it active) - _, poppedTasks, _ := ptq.PopTasks(10) - popped = append(popped, poppedTasks...) - } - if len(popped) != expCount { - t.Fatalf("Expected %d tasks, received %d tasks", expCount, len(popped)) - } - } - - // should ignore second want-have - runTestCase([]peertask.Task{wantHave, wantHave}, 1) - // should ignore second want-block - runTestCase([]peertask.Task{wantBlock, wantBlock}, 1) - // want-have does not overwrite want-block - runTestCase([]peertask.Task{wantBlock, wantHave}, 1) - // can't replace want-have with want-block because want-have is active - runTestCase([]peertask.Task{wantHave, wantBlock}, 2) -} - -func TestPushSizeInfoActive(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] - - wantBlock := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 10, - Data: &taskData{ - IsWantBlock: true, - BlockSize: 10, - HaveBlock: true, - SendDontHave: false, - }, - } - wantBlockDontHave := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 2, - Data: &taskData{ - IsWantBlock: true, - BlockSize: 0, - HaveBlock: false, - SendDontHave: false, - }, - } - wantHave := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 1, - Data: &taskData{ - IsWantBlock: false, - BlockSize: 10, - HaveBlock: true, - SendDontHave: false, - }, - } - wantHaveDontHave := peertask.Task{ - Topic: "1", - Priority: 10, - Work: 1, - Data: &taskData{ - IsWantBlock: false, - BlockSize: 0, - HaveBlock: false, - SendDontHave: false, - }, - } - - runTestCase := func(tasks []peertask.Task, expTasks []peertask.Task) { - tasks = cloneTasks(tasks) - ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) - var popped []*peertask.Task - for _, task := range tasks { - // Push the task - ptq.PushTasks(partner, task) - // Pop the task (which makes it active) - _, poppedTasks, _ := ptq.PopTasks(10) - popped = append(popped, poppedTasks...) - } - if len(popped) != len(expTasks) { - t.Fatalf("Expected %d tasks, received %d tasks", len(expTasks), len(popped)) - } - for i, task := range popped { - td := task.Data.(*taskData) - expTd := expTasks[i].Data.(*taskData) - if td.IsWantBlock != expTd.IsWantBlock { - t.Fatalf("Expected IsWantBlock to be %t, received %t", expTd.IsWantBlock, td.IsWantBlock) - } - if task.Work != expTasks[i].Work { - t.Fatalf("Expected Size to be %d, received %d", expTasks[i].Work, task.Work) - } - } - } - - // second want-block (DONT_HAVE) should be ignored - runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, []peertask.Task{wantBlockDontHave}) - // want-have (DONT_HAVE) should be ignored if there is existing active want-block (DONT_HAVE) - runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, []peertask.Task{wantBlockDontHave}) - // want-block with size should be added if there is existing active want-block (DONT_HAVE) - runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, []peertask.Task{wantBlockDontHave, wantBlock}) - // want-have with size should be added if there is existing active want-block (DONT_HAVE) - runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, []peertask.Task{wantBlockDontHave, wantHave}) - - // want-block (DONT_HAVE) should be added if there is existing active want-have (DONT_HAVE) - runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, []peertask.Task{wantHaveDontHave, wantBlockDontHave}) - // want-have (DONT_HAVE) should be ignored if there is existing active want-have (DONT_HAVE) - runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, []peertask.Task{wantHaveDontHave}) - // want-block with size should be added if there is existing active want-have (DONT_HAVE) - runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, []peertask.Task{wantHaveDontHave, wantBlock}) - // want-have with size should be added if there is existing active want-have (DONT_HAVE) - runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, []peertask.Task{wantHaveDontHave, wantHave}) - - // want-block (DONT_HAVE) should be ignored if there is existing active want-block with size - runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, []peertask.Task{wantBlock}) - // want-have (DONT_HAVE) should be ignored if there is existing active want-block with size - runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, []peertask.Task{wantBlock}) - // second want-block with size should be ignored - runTestCase([]peertask.Task{wantBlock, wantBlock}, []peertask.Task{wantBlock}) - // want-have with size should be ignored if there is existing active want-block with size - runTestCase([]peertask.Task{wantBlock, wantHave}, []peertask.Task{wantBlock}) - - // want-block (DONT_HAVE) should be added if there is existing active want-have with size - runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, []peertask.Task{wantHave, wantBlockDontHave}) - // want-have (DONT_HAVE) should be ignored if there is existing active want-have with size - runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, []peertask.Task{wantHave}) - // second want-have with size should be ignored - runTestCase([]peertask.Task{wantHave, wantHave}, []peertask.Task{wantHave}) - // want-block with size should be added if there is existing active want-have with size - runTestCase([]peertask.Task{wantHave, wantBlock}, []peertask.Task{wantHave, wantBlock}) -} - -func cloneTasks(tasks []peertask.Task) []peertask.Task { - var cp []peertask.Task - for _, t := range tasks { - td := t.Data.(*taskData) - cp = append(cp, peertask.Task{ - Topic: t.Topic, - Priority: t.Priority, - Work: t.Work, - Data: &taskData{ - IsWantBlock: td.IsWantBlock, - BlockSize: td.BlockSize, - HaveBlock: td.HaveBlock, - SendDontHave: td.SendDontHave, - }, - }) - } - return cp -} diff --git a/server/server.go b/server/server.go index db7733dc..a1d5cd97 100644 --- a/server/server.go +++ b/server/server.go @@ -2,175 +2,61 @@ package server import ( "context" - "errors" - "fmt" - "sort" - "sync" - "time" - "github.com/ipfs/go-bitswap/internal/defaults" - "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" - bmetrics "github.com/ipfs/go-bitswap/metrics" bsnet "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-bitswap/server/internal/decision" "github.com/ipfs/go-bitswap/tracer" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log" - "github.com/ipfs/go-metrics-interface" - process "github.com/jbenet/goprocess" - procctx "github.com/jbenet/goprocess/context" - "github.com/libp2p/go-libp2p/core/peer" - "go.uber.org/zap" + libipfs "github.com/ipfs/go-libipfs/bitswap/server" ) -var provideKeysBufferSize = 2048 +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.Option instead +type Option = libipfs.Option -var log = logging.Logger("bitswap-server") -var sflog = log.Desugar() - -const provideWorkerMax = 6 - -type Option func(*Server) - -type Server struct { - sentHistogram metrics.Histogram - sendTimeHistogram metrics.Histogram - - // the engine is the bit of logic that decides who to send which blocks to - engine *decision.Engine - - // network delivers messages on behalf of the session - network bsnet.BitSwapNetwork - - // External statistics interface - tracer tracer.Tracer - - // Counters for various statistics - counterLk sync.Mutex - counters Stat - - // the total number of simultaneous threads sending outgoing messages - taskWorkerCount int - - process process.Process - - // newBlocks is a channel for newly added blocks to be provided to the - // network. blocks pushed down this channel get buffered and fed to the - // provideKeys channel later on to avoid too much network activity - newBlocks chan cid.Cid - // provideKeys directly feeds provide workers - provideKeys chan cid.Cid - - // Extra options to pass to the decision manager - engineOptions []decision.Option - - // the size of channel buffer to use - hasBlockBufferSize int - // whether or not to make provide announcements - provideEnabled bool -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.Server instead +type Server = libipfs.Server +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.New instead func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Server { - ctx, cancel := context.WithCancel(ctx) - - px := process.WithTeardown(func() error { - return nil - }) - go func() { - <-px.Closing() // process closes first - cancel() - }() - - s := &Server{ - sentHistogram: bmetrics.SentHist(ctx), - sendTimeHistogram: bmetrics.SendTimeHist(ctx), - taskWorkerCount: defaults.BitswapTaskWorkerCount, - network: network, - process: px, - provideEnabled: true, - hasBlockBufferSize: defaults.HasBlockBufferSize, - provideKeys: make(chan cid.Cid, provideKeysBufferSize), - } - s.newBlocks = make(chan cid.Cid, s.hasBlockBufferSize) - - for _, o := range options { - o(s) - } - - s.engine = decision.NewEngine( - ctx, - bstore, - network.ConnectionManager(), - network.Self(), - s.engineOptions..., - ) - s.engineOptions = nil - - s.startWorkers(ctx, px) - - return s + return libipfs.New(ctx, network, bstore, options...) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.TaskWorkerCount instead func TaskWorkerCount(count int) Option { - if count <= 0 { - panic(fmt.Sprintf("task worker count is %d but must be > 0", count)) - } - return func(bs *Server) { - bs.taskWorkerCount = count - } + return libipfs.TaskWorkerCount(count) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.WithTracer instead func WithTracer(tap tracer.Tracer) Option { - return func(bs *Server) { - bs.tracer = tap - } + return libipfs.WithTracer(tap) } // ProvideEnabled is an option for enabling/disabling provide announcements +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.ProvideEnabled instead func ProvideEnabled(enabled bool) Option { - return func(bs *Server) { - bs.provideEnabled = enabled - } + return libipfs.ProvideEnabled(enabled) } -func WithPeerBlockRequestFilter(pbrf decision.PeerBlockRequestFilter) Option { - o := decision.WithPeerBlockRequestFilter(pbrf) - return func(bs *Server) { - bs.engineOptions = append(bs.engineOptions, o) - } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.WithPeerBlockRequestFilter instead +func WithPeerBlockRequestFilter(pbrf PeerBlockRequestFilter) Option { + return libipfs.WithPeerBlockRequestFilter(pbrf) } // WithTaskComparator configures custom task prioritization logic. -func WithTaskComparator(comparator decision.TaskComparator) Option { - o := decision.WithTaskComparator(comparator) - return func(bs *Server) { - bs.engineOptions = append(bs.engineOptions, o) - } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.WithTaskComparator instead +func WithTaskComparator(comparator TaskComparator) Option { + return libipfs.WithTaskComparator(comparator) } // Configures the engine to use the given score decision logic. -func WithScoreLedger(scoreLedger decision.ScoreLedger) Option { - o := decision.WithScoreLedger(scoreLedger) - return func(bs *Server) { - bs.engineOptions = append(bs.engineOptions, o) - } -} - -// LedgerForPeer returns aggregated data about blocks swapped and communication -// with a given peer. -func (bs *Server) LedgerForPeer(p peer.ID) *decision.Receipt { - return bs.engine.LedgerForPeer(p) +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.WithScoreLedger instead +func WithScoreLedger(scoreLedger ScoreLedger) Option { + return libipfs.WithScoreLedger(scoreLedger) } // EngineTaskWorkerCount sets the number of worker threads used inside the engine +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.EngineTaskWorkerCount instead func EngineTaskWorkerCount(count int) Option { - o := decision.WithTaskWorkerCount(count) - return func(bs *Server) { - bs.engineOptions = append(bs.engineOptions, o) - } + return libipfs.EngineTaskWorkerCount(count) } // SetSendDontHaves indicates what to do when the engine receives a want-block @@ -178,359 +64,35 @@ func EngineTaskWorkerCount(count int) Option { // - Send a DONT_HAVE message // - Simply don't respond // This option is only used for testing. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.SetSendDontHaves instead func SetSendDontHaves(send bool) Option { - o := decision.WithSetSendDontHave(send) - return func(bs *Server) { - bs.engineOptions = append(bs.engineOptions, o) - } + return libipfs.SetSendDontHaves(send) } // EngineBlockstoreWorkerCount sets the number of worker threads used for // blockstore operations in the decision engine +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.EngineBlockstoreWorkerCount instead func EngineBlockstoreWorkerCount(count int) Option { - o := decision.WithBlockstoreWorkerCount(count) - return func(bs *Server) { - bs.engineOptions = append(bs.engineOptions, o) - } + return libipfs.EngineBlockstoreWorkerCount(count) } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.WithTargetMessageSize instead func WithTargetMessageSize(tms int) Option { - o := decision.WithTargetMessageSize(tms) - return func(bs *Server) { - bs.engineOptions = append(bs.engineOptions, o) - } + return libipfs.WithTargetMessageSize(tms) } // MaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any // given time. Setting it to 0 will disable any limiting. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.MaxOutstandingBytesPerPeer instead func MaxOutstandingBytesPerPeer(count int) Option { - o := decision.WithMaxOutstandingBytesPerPeer(count) - return func(bs *Server) { - bs.engineOptions = append(bs.engineOptions, o) - } + return libipfs.MaxOutstandingBytesPerPeer(count) } // HasBlockBufferSize configure how big the new blocks buffer should be. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.HasBlockBufferSize instead func HasBlockBufferSize(count int) Option { - if count < 0 { - panic("cannot have negative buffer size") - } - return func(bs *Server) { - bs.hasBlockBufferSize = count - } -} - -// WantlistForPeer returns the currently understood list of blocks requested by a -// given peer. -func (bs *Server) WantlistForPeer(p peer.ID) []cid.Cid { - var out []cid.Cid - for _, e := range bs.engine.WantlistForPeer(p) { - out = append(out, e.Cid) - } - return out -} - -func (bs *Server) startWorkers(ctx context.Context, px process.Process) { - bs.engine.StartWorkers(ctx, px) - - // Start up workers to handle requests from other nodes for the data on this node - for i := 0; i < bs.taskWorkerCount; i++ { - i := i - px.Go(func(px process.Process) { - bs.taskWorker(ctx, i) - }) - } - - if bs.provideEnabled { - // Start up a worker to manage sending out provides messages - px.Go(func(px process.Process) { - bs.provideCollector(ctx) - }) - - // Spawn up multiple workers to handle incoming blocks - // consider increasing number if providing blocks bottlenecks - // file transfers - px.Go(bs.provideWorker) - } -} - -func (bs *Server) taskWorker(ctx context.Context, id int) { - defer log.Debug("bitswap task worker shutting down...") - log := log.With("ID", id) - for { - log.Debug("Bitswap.TaskWorker.Loop") - select { - case nextEnvelope := <-bs.engine.Outbox(): - select { - case envelope, ok := <-nextEnvelope: - if !ok { - continue - } - - start := time.Now() - - // TODO: Only record message as sent if there was no error? - // Ideally, yes. But we'd need some way to trigger a retry and/or drop - // the peer. - bs.engine.MessageSent(envelope.Peer, envelope.Message) - if bs.tracer != nil { - bs.tracer.MessageSent(envelope.Peer, envelope.Message) - } - bs.sendBlocks(ctx, envelope) - - dur := time.Since(start) - bs.sendTimeHistogram.Observe(dur.Seconds()) - - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } -} - -func (bs *Server) logOutgoingBlocks(env *decision.Envelope) { - if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { - return - } - - self := bs.network.Self() - - for _, blockPresence := range env.Message.BlockPresences() { - c := blockPresence.Cid - switch blockPresence.Type { - case pb.Message_Have: - log.Debugw("sent message", - "type", "HAVE", - "cid", c, - "local", self, - "to", env.Peer, - ) - case pb.Message_DontHave: - log.Debugw("sent message", - "type", "DONT_HAVE", - "cid", c, - "local", self, - "to", env.Peer, - ) - default: - panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) - } - - } - for _, block := range env.Message.Blocks() { - log.Debugw("sent message", - "type", "BLOCK", - "cid", block.Cid(), - "local", self, - "to", env.Peer, - ) - } + return libipfs.HasBlockBufferSize(count) } -func (bs *Server) sendBlocks(ctx context.Context, env *decision.Envelope) { - // Blocks need to be sent synchronously to maintain proper backpressure - // throughout the network stack - defer env.Sent() - - err := bs.network.SendMessage(ctx, env.Peer, env.Message) - if err != nil { - log.Debugw("failed to send blocks message", - "peer", env.Peer, - "error", err, - ) - return - } - - bs.logOutgoingBlocks(env) - - dataSent := 0 - blocks := env.Message.Blocks() - for _, b := range blocks { - dataSent += len(b.RawData()) - } - bs.counterLk.Lock() - bs.counters.BlocksSent += uint64(len(blocks)) - bs.counters.DataSent += uint64(dataSent) - bs.counterLk.Unlock() - bs.sentHistogram.Observe(float64(env.Message.Size())) - log.Debugw("sent message", "peer", env.Peer) -} - -type Stat struct { - Peers []string - ProvideBufLen int - BlocksSent uint64 - DataSent uint64 -} - -// Stat returns aggregated statistics about bitswap operations -func (bs *Server) Stat() (Stat, error) { - bs.counterLk.Lock() - s := bs.counters - bs.counterLk.Unlock() - s.ProvideBufLen = len(bs.newBlocks) - - peers := bs.engine.Peers() - peersStr := make([]string, len(peers)) - for i, p := range peers { - peersStr[i] = p.Pretty() - } - sort.Strings(peersStr) - s.Peers = peersStr - - return s, nil -} - -// NotifyNewBlocks announces the existence of blocks to this bitswap service. The -// service will potentially notify its peers. -// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure -// that those blocks are available in the blockstore before calling this function. -func (bs *Server) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { - select { - case <-bs.process.Closing(): - return errors.New("bitswap is closed") - default: - } - - // Send wanted blocks to decision engine - bs.engine.NotifyNewBlocks(blks) - - // If the reprovider is enabled, send block to reprovider - if bs.provideEnabled { - for _, blk := range blks { - select { - case bs.newBlocks <- blk.Cid(): - // send block off to be reprovided - case <-bs.process.Closing(): - return bs.process.Close() - } - } - } - - return nil -} - -func (bs *Server) provideCollector(ctx context.Context) { - defer close(bs.provideKeys) - var toProvide []cid.Cid - var nextKey cid.Cid - var keysOut chan cid.Cid - - for { - select { - case blkey, ok := <-bs.newBlocks: - if !ok { - log.Debug("newBlocks channel closed") - return - } - - if keysOut == nil { - nextKey = blkey - keysOut = bs.provideKeys - } else { - toProvide = append(toProvide, blkey) - } - case keysOut <- nextKey: - if len(toProvide) > 0 { - nextKey = toProvide[0] - toProvide = toProvide[1:] - } else { - keysOut = nil - } - case <-ctx.Done(): - return - } - } -} - -func (bs *Server) provideWorker(px process.Process) { - // FIXME: OnClosingContext returns a _custom_ context type. - // Unfortunately, deriving a new cancelable context from this custom - // type fires off a goroutine. To work around this, we create a single - // cancelable context up-front and derive all sub-contexts from that. - // - // See: https://github.com/ipfs/go-ipfs/issues/5810 - ctx := procctx.OnClosingContext(px) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - limit := make(chan struct{}, provideWorkerMax) - - limitedGoProvide := func(k cid.Cid, wid int) { - defer func() { - // replace token when done - <-limit - }() - - log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) - defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) - - ctx, cancel := context.WithTimeout(ctx, defaults.ProvideTimeout) // timeout ctx - defer cancel() - - if err := bs.network.Provide(ctx, k); err != nil { - log.Warn(err) - } - } - - // worker spawner, reads from bs.provideKeys until it closes, spawning a - // _ratelimited_ number of workers to handle each key. - for wid := 2; ; wid++ { - log.Debug("Bitswap.ProvideWorker.Loop") - - select { - case <-px.Closing(): - return - case k, ok := <-bs.provideKeys: - if !ok { - log.Debug("provideKeys channel closed") - return - } - select { - case <-px.Closing(): - return - case limit <- struct{}{}: - go limitedGoProvide(k, wid) - } - } - } -} - -func (bs *Server) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { - // This call records changes to wantlists, blocks received, - // and number of bytes transfered. - bs.engine.MessageReceived(ctx, p, incoming) - // TODO: this is bad, and could be easily abused. - // Should only track *useful* messages in ledger - - if bs.tracer != nil { - bs.tracer.MessageReceived(p, incoming) - } -} - -// ReceivedBlocks notify the decision engine that a peer is well behaving -// and gave us usefull data, potentially increasing it's score and making us -// send them more data in exchange. -func (bs *Server) ReceivedBlocks(from peer.ID, blks []blocks.Block) { - bs.engine.ReceivedBlocks(from, blks) -} - -func (*Server) ReceiveError(err error) { - log.Infof("Bitswap Client ReceiveError: %s", err) - // TODO log the network error - // TODO bubble the network error up to the parent context/error logger - -} -func (bs *Server) PeerConnected(p peer.ID) { - bs.engine.PeerConnected(p) -} -func (bs *Server) PeerDisconnected(p peer.ID) { - bs.engine.PeerDisconnected(p) -} - -// Close is called to shutdown the Client -func (bs *Server) Close() error { - return bs.process.Close() -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/server.Stat instead +type Stat = libipfs.Stat diff --git a/testinstance/testinstance.go b/testinstance/testinstance.go index b4936996..2ea5cca5 100644 --- a/testinstance/testinstance.go +++ b/testinstance/testinstance.go @@ -2,131 +2,41 @@ package testsession import ( "context" - "time" - "github.com/ipfs/go-bitswap" - bsnet "github.com/ipfs/go-bitswap/network" tn "github.com/ipfs/go-bitswap/testnet" - ds "github.com/ipfs/go-datastore" - delayed "github.com/ipfs/go-datastore/delayed" - ds_sync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - delay "github.com/ipfs/go-ipfs-delay" + libipfsbs "github.com/ipfs/go-libipfs/bitswap" + libipfsnet "github.com/ipfs/go-libipfs/bitswap/network" + libipfs "github.com/ipfs/go-libipfs/bitswap/testinstance" tnet "github.com/libp2p/go-libp2p-testing/net" - p2ptestutil "github.com/libp2p/go-libp2p-testing/netutil" - peer "github.com/libp2p/go-libp2p/core/peer" ) // NewTestInstanceGenerator generates a new InstanceGenerator for the given // testnet -func NewTestInstanceGenerator(net tn.Network, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) InstanceGenerator { - ctx, cancel := context.WithCancel(context.Background()) - return InstanceGenerator{ - net: net, - seq: 0, - ctx: ctx, // TODO take ctx as param to Next, Instances - cancel: cancel, - bsOptions: bsOptions, - netOptions: netOptions, - } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testinstance.NewTestInstanceGenerator instead +func NewTestInstanceGenerator(net tn.Network, netOptions []libipfsnet.NetOpt, bsOptions []libipfsbs.Option) InstanceGenerator { + return libipfs.NewTestInstanceGenerator(net, netOptions, bsOptions) } // InstanceGenerator generates new test instances of bitswap+dependencies -type InstanceGenerator struct { - seq int - net tn.Network - ctx context.Context - cancel context.CancelFunc - bsOptions []bitswap.Option - netOptions []bsnet.NetOpt -} - -// Close closes the clobal context, shutting down all test instances -func (g *InstanceGenerator) Close() error { - g.cancel() - return nil // for Closer interface -} - -// Next generates a new instance of bitswap + dependencies -func (g *InstanceGenerator) Next() Instance { - g.seq++ - p, err := p2ptestutil.RandTestBogusIdentity() - if err != nil { - panic("FIXME") // TODO change signature - } - return NewInstance(g.ctx, g.net, p, g.netOptions, g.bsOptions) -} - -// Instances creates N test instances of bitswap + dependencies and connects -// them to each other -func (g *InstanceGenerator) Instances(n int) []Instance { - var instances []Instance - for j := 0; j < n; j++ { - inst := g.Next() - instances = append(instances, inst) - } - ConnectInstances(instances) - return instances -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testinstance.InstanceGenerator instead +type InstanceGenerator = libipfs.InstanceGenerator // ConnectInstances connects the given instances to each other +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testinstance.ConnectInstances instead func ConnectInstances(instances []Instance) { - for i, inst := range instances { - for j := i + 1; j < len(instances); j++ { - oinst := instances[j] - err := inst.Adapter.ConnectTo(context.Background(), oinst.Peer) - if err != nil { - panic(err.Error()) - } - } - } + libipfs.ConnectInstances(instances) } // Instance is a test instance of bitswap + dependencies for integration testing -type Instance struct { - Peer peer.ID - Exchange *bitswap.Bitswap - blockstore blockstore.Blockstore - Adapter bsnet.BitSwapNetwork - blockstoreDelay delay.D -} - -// Blockstore returns the block store for this test instance -func (i *Instance) Blockstore() blockstore.Blockstore { - return i.blockstore -} - -// SetBlockstoreLatency customizes the artificial delay on receiving blocks -// from a blockstore test instance. -func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { - return i.blockstoreDelay.Set(t) -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testinstance.Instance instead +type Instance = libipfs.Instance // NewInstance creates a test bitswap instance. // // NB: It's easy make mistakes by providing the same peer ID to two different // instances. To safeguard, use the InstanceGenerator to generate instances. It's // just a much better idea. -func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) Instance { - bsdelay := delay.Fixed(0) - - adapter := net.Adapter(p, netOptions...) - dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) - - bstore, err := blockstore.CachedBlockstore(ctx, - blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), - blockstore.DefaultCacheOpts()) - if err != nil { - panic(err.Error()) // FIXME perhaps change signature and return error. - } - - bs := bitswap.New(ctx, adapter, bstore, bsOptions...) - - return Instance{ - Adapter: adapter, - Peer: p.ID(), - Exchange: bs, - blockstore: bstore, - blockstoreDelay: bsdelay, - } +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testinstance.NewInstance instead +func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, netOptions []libipfsnet.NetOpt, bsOptions []libipfsbs.Option) Instance { + return libipfs.NewInstance(ctx, net, p, netOptions, bsOptions) } diff --git a/testnet/interface.go b/testnet/interface.go index ed5c2ab7..407a7911 100644 --- a/testnet/interface.go +++ b/testnet/interface.go @@ -1,16 +1,10 @@ package bitswap import ( - bsnet "github.com/ipfs/go-bitswap/network" - - tnet "github.com/libp2p/go-libp2p-testing/net" - "github.com/libp2p/go-libp2p/core/peer" + libipfs "github.com/ipfs/go-libipfs/bitswap/testnet" ) // Network is an interface for generating bitswap network interfaces // based on a test network. -type Network interface { - Adapter(tnet.Identity, ...bsnet.NetOpt) bsnet.BitSwapNetwork - - HasPeer(peer.ID) bool -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testnet.Network instead +type Network = libipfs.Network diff --git a/testnet/internet_latency_delay_generator.go b/testnet/internet_latency_delay_generator.go index 25b9f5b8..8904ef56 100644 --- a/testnet/internet_latency_delay_generator.go +++ b/testnet/internet_latency_delay_generator.go @@ -4,11 +4,10 @@ import ( "math/rand" "time" - "github.com/ipfs/go-ipfs-delay" + delay "github.com/ipfs/go-ipfs-delay" + libipfs "github.com/ipfs/go-libipfs/bitswap/testnet" ) -var sharedRNG = rand.New(rand.NewSource(time.Now().UnixNano())) - // InternetLatencyDelayGenerator generates three clusters of delays, // typical of the type of peers you would encounter on the interenet. // Given a base delay time T, the wait time generated will be either: @@ -21,6 +20,7 @@ var sharedRNG = rand.New(rand.NewSource(time.Now().UnixNano())) // the normalized distribution. // This can be used to generate a number of scenarios typical of latency // distribution among peers on the internet. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testnet.InternetLatencyDelayGenerator instead func InternetLatencyDelayGenerator( mediumDelay time.Duration, largeDelay time.Duration, @@ -28,36 +28,5 @@ func InternetLatencyDelayGenerator( percentLarge float64, std time.Duration, rng *rand.Rand) delay.Generator { - if rng == nil { - rng = sharedRNG - } - - return &internetLatencyDelayGenerator{ - mediumDelay: mediumDelay, - largeDelay: largeDelay, - percentLarge: percentLarge, - percentMedium: percentMedium, - std: std, - rng: rng, - } -} - -type internetLatencyDelayGenerator struct { - mediumDelay time.Duration - largeDelay time.Duration - percentLarge float64 - percentMedium float64 - std time.Duration - rng *rand.Rand -} - -func (d *internetLatencyDelayGenerator) NextWaitTime(t time.Duration) time.Duration { - clusterDistribution := d.rng.Float64() - baseDelay := time.Duration(d.rng.NormFloat64()*float64(d.std)) + t - if clusterDistribution < d.percentLarge { - return baseDelay + d.largeDelay - } else if clusterDistribution < d.percentMedium+d.percentLarge { - return baseDelay + d.mediumDelay - } - return baseDelay + return libipfs.InternetLatencyDelayGenerator(mediumDelay, largeDelay, percentMedium, percentLarge, std, rng) } diff --git a/testnet/internet_latency_delay_generator_test.go b/testnet/internet_latency_delay_generator_test.go deleted file mode 100644 index dcd6a92b..00000000 --- a/testnet/internet_latency_delay_generator_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package bitswap - -import ( - "math" - "math/rand" - "testing" - "time" -) - -const testSeed = 99 - -func TestInternetLatencyDelayNextWaitTimeDistribution(t *testing.T) { - initialValue := 1000 * time.Millisecond - deviation := 100 * time.Millisecond - mediumDelay := 1000 * time.Millisecond - largeDelay := 3000 * time.Millisecond - percentMedium := 0.2 - percentLarge := 0.4 - buckets := make(map[string]int) - internetLatencyDistributionDelay := InternetLatencyDelayGenerator( - mediumDelay, - largeDelay, - percentMedium, - percentLarge, - deviation, - rand.New(rand.NewSource(testSeed))) - - buckets["fast"] = 0 - buckets["medium"] = 0 - buckets["slow"] = 0 - buckets["outside_1_deviation"] = 0 - - // strategy here is rather than mock randomness, just use enough samples to - // get approximately the distribution you'd expect - for i := 0; i < 10000; i++ { - next := internetLatencyDistributionDelay.NextWaitTime(initialValue) - if math.Abs((next - initialValue).Seconds()) <= deviation.Seconds() { - buckets["fast"]++ - } else if math.Abs((next - initialValue - mediumDelay).Seconds()) <= deviation.Seconds() { - buckets["medium"]++ - } else if math.Abs((next - initialValue - largeDelay).Seconds()) <= deviation.Seconds() { - buckets["slow"]++ - } else { - buckets["outside_1_deviation"]++ - } - } - totalInOneDeviation := float64(10000 - buckets["outside_1_deviation"]) - oneDeviationPercentage := totalInOneDeviation / 10000 - fastPercentageResult := float64(buckets["fast"]) / totalInOneDeviation - mediumPercentageResult := float64(buckets["medium"]) / totalInOneDeviation - slowPercentageResult := float64(buckets["slow"]) / totalInOneDeviation - - // see 68-95-99 rule for normal distributions - if math.Abs(oneDeviationPercentage-0.6827) >= 0.1 { - t.Fatal("Failed to distribute values normally based on standard deviation") - } - - if math.Abs(fastPercentageResult+percentMedium+percentLarge-1) >= 0.1 { - t.Fatal("Incorrect percentage of values distributed around fast delay time") - } - - if math.Abs(mediumPercentageResult-percentMedium) >= 0.1 { - t.Fatal("Incorrect percentage of values distributed around medium delay time") - } - - if math.Abs(slowPercentageResult-percentLarge) >= 0.1 { - t.Fatal("Incorrect percentage of values distributed around slow delay time") - } -} diff --git a/testnet/network_test.go b/testnet/network_test.go deleted file mode 100644 index 1bac2be7..00000000 --- a/testnet/network_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package bitswap - -import ( - "context" - "sync" - "testing" - - bsmsg "github.com/ipfs/go-bitswap/message" - bsnet "github.com/ipfs/go-bitswap/network" - - blocks "github.com/ipfs/go-block-format" - delay "github.com/ipfs/go-ipfs-delay" - mockrouting "github.com/ipfs/go-ipfs-routing/mock" - - tnet "github.com/libp2p/go-libp2p-testing/net" - "github.com/libp2p/go-libp2p/core/peer" -) - -func TestSendMessageAsyncButWaitForResponse(t *testing.T) { - net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) - responderPeer := tnet.RandIdentityOrFatal(t) - waiter := net.Adapter(tnet.RandIdentityOrFatal(t)) - responder := net.Adapter(responderPeer) - - var wg sync.WaitGroup - - wg.Add(1) - - expectedStr := "received async" - - responder.Start(lambda(func( - ctx context.Context, - fromWaiter peer.ID, - msgFromWaiter bsmsg.BitSwapMessage) { - - msgToWaiter := bsmsg.New(true) - msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) - err := waiter.SendMessage(ctx, fromWaiter, msgToWaiter) - if err != nil { - t.Error(err) - } - })) - t.Cleanup(responder.Stop) - - waiter.Start(lambda(func( - ctx context.Context, - fromResponder peer.ID, - msgFromResponder bsmsg.BitSwapMessage) { - - // TODO assert that this came from the correct peer and that the message contents are as expected - ok := false - for _, b := range msgFromResponder.Blocks() { - if string(b.RawData()) == expectedStr { - wg.Done() - ok = true - } - } - - if !ok { - t.Fatal("Message not received from the responder") - } - })) - t.Cleanup(waiter.Stop) - - messageSentAsync := bsmsg.New(true) - messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) - errSending := waiter.SendMessage( - context.Background(), responderPeer.ID(), messageSentAsync) - if errSending != nil { - t.Fatal(errSending) - } - - wg.Wait() // until waiter delegate function is executed -} - -type receiverFunc func(ctx context.Context, p peer.ID, - incoming bsmsg.BitSwapMessage) - -// lambda returns a Receiver instance given a receiver function -func lambda(f receiverFunc) bsnet.Receiver { - return &lambdaImpl{ - f: f, - } -} - -type lambdaImpl struct { - f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) -} - -func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, - p peer.ID, incoming bsmsg.BitSwapMessage) { - lam.f(ctx, p, incoming) -} - -func (lam *lambdaImpl) ReceiveError(err error) { - // TODO log error -} - -func (lam *lambdaImpl) PeerConnected(p peer.ID) { - // TODO -} -func (lam *lambdaImpl) PeerDisconnected(peer.ID) { - // TODO -} diff --git a/testnet/peernet.go b/testnet/peernet.go index 8a7a6d2e..d04f0011 100644 --- a/testnet/peernet.go +++ b/testnet/peernet.go @@ -3,42 +3,14 @@ package bitswap import ( "context" - bsnet "github.com/ipfs/go-bitswap/network" - - ds "github.com/ipfs/go-datastore" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - tnet "github.com/libp2p/go-libp2p-testing/net" - "github.com/libp2p/go-libp2p/core/peer" + libipfs "github.com/ipfs/go-libipfs/bitswap/testnet" mockpeernet "github.com/libp2p/go-libp2p/p2p/net/mock" ) -type peernet struct { - mockpeernet.Mocknet - routingserver mockrouting.Server -} - // StreamNet is a testnet that uses libp2p's MockNet +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testnet.StreamNet instead func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Server) (Network, error) { - return &peernet{net, rs}, nil -} - -func (pn *peernet) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { - client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) - if err != nil { - panic(err.Error()) - } - routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) - return bsnet.NewFromIpfsHost(client, routing, opts...) -} - -func (pn *peernet) HasPeer(p peer.ID) bool { - for _, member := range pn.Mocknet.Peers() { - if p == member { - return true - } - } - return false + return libipfs.StreamNet(ctx, net, rs) } - -var _ Network = (*peernet)(nil) diff --git a/testnet/rate_limit_generators.go b/testnet/rate_limit_generators.go index 2c4a1cd5..403a5b54 100644 --- a/testnet/rate_limit_generators.go +++ b/testnet/rate_limit_generators.go @@ -2,41 +2,19 @@ package bitswap import ( "math/rand" -) -type fixedRateLimitGenerator struct { - rateLimit float64 -} + libipfs "github.com/ipfs/go-libipfs/bitswap/testnet" +) // FixedRateLimitGenerator returns a rate limit generatoe that always generates // the specified rate limit in bytes/sec. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testnet.FixedRateLimitGenerator instead func FixedRateLimitGenerator(rateLimit float64) RateLimitGenerator { - return &fixedRateLimitGenerator{rateLimit} -} - -func (rateLimitGenerator *fixedRateLimitGenerator) NextRateLimit() float64 { - return rateLimitGenerator.rateLimit -} - -type variableRateLimitGenerator struct { - rateLimit float64 - std float64 - rng *rand.Rand + return libipfs.FixedRateLimitGenerator(rateLimit) } // VariableRateLimitGenerator makes rate limites that following a normal distribution. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testnet.VariableRateLimitGenerator instead func VariableRateLimitGenerator(rateLimit float64, std float64, rng *rand.Rand) RateLimitGenerator { - if rng == nil { - rng = sharedRNG - } - - return &variableRateLimitGenerator{ - std: std, - rng: rng, - rateLimit: rateLimit, - } -} - -func (rateLimitGenerator *variableRateLimitGenerator) NextRateLimit() float64 { - return rateLimitGenerator.rng.NormFloat64()*rateLimitGenerator.std + rateLimitGenerator.rateLimit + return libipfs.VariableRateLimitGenerator(rateLimit, std, rng) } diff --git a/testnet/virtual.go b/testnet/virtual.go index 68f1bff4..e7214ac6 100644 --- a/testnet/virtual.go +++ b/testnet/virtual.go @@ -1,428 +1,26 @@ package bitswap import ( - "context" - "errors" - "sort" - "sync" - "sync/atomic" - "time" - - bsmsg "github.com/ipfs/go-bitswap/message" - bsnet "github.com/ipfs/go-bitswap/network" - - cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - tnet "github.com/libp2p/go-libp2p-testing/net" - "github.com/libp2p/go-libp2p/core/connmgr" - "github.com/libp2p/go-libp2p/core/peer" - protocol "github.com/libp2p/go-libp2p/core/protocol" - "github.com/libp2p/go-libp2p/core/routing" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/libp2p/go-libp2p/p2p/protocol/ping" + libipfs "github.com/ipfs/go-libipfs/bitswap/testnet" ) // VirtualNetwork generates a new testnet instance - a fake network that // is used to simulate sending messages. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testnet.VirtualNetwork instead func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { - return &network{ - latencies: make(map[peer.ID]map[peer.ID]time.Duration), - clients: make(map[peer.ID]*receiverQueue), - delay: d, - routingserver: rs, - isRateLimited: false, - rateLimitGenerator: nil, - conns: make(map[string]struct{}), - } + return libipfs.VirtualNetwork(rs, d) } // RateLimitGenerator is an interface for generating rate limits across peers -type RateLimitGenerator interface { - NextRateLimit() float64 -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testnet.RateLimitGenerator instead +type RateLimitGenerator = libipfs.RateLimitGenerator // RateLimitedVirtualNetwork generates a testnet instance where nodes are rate // limited in the upload/download speed. +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/testnet.RateLimitedVirtualNetwork instead func RateLimitedVirtualNetwork(rs mockrouting.Server, d delay.D, rateLimitGenerator RateLimitGenerator) Network { - return &network{ - latencies: make(map[peer.ID]map[peer.ID]time.Duration), - rateLimiters: make(map[peer.ID]map[peer.ID]*mocknet.RateLimiter), - clients: make(map[peer.ID]*receiverQueue), - delay: d, - routingserver: rs, - isRateLimited: true, - rateLimitGenerator: rateLimitGenerator, - conns: make(map[string]struct{}), - } -} - -type network struct { - mu sync.Mutex - latencies map[peer.ID]map[peer.ID]time.Duration - rateLimiters map[peer.ID]map[peer.ID]*mocknet.RateLimiter - clients map[peer.ID]*receiverQueue - routingserver mockrouting.Server - delay delay.D - isRateLimited bool - rateLimitGenerator RateLimitGenerator - conns map[string]struct{} -} - -type message struct { - from peer.ID - msg bsmsg.BitSwapMessage - shouldSend time.Time -} - -// receiverQueue queues up a set of messages to be sent, and sends them *in -// order* with their delays respected as much as sending them in order allows -// for -type receiverQueue struct { - receiver *networkClient - queue []*message - active bool - lk sync.Mutex -} - -func (n *network) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { - n.mu.Lock() - defer n.mu.Unlock() - - s := bsnet.Settings{ - SupportedProtocols: []protocol.ID{ - bsnet.ProtocolBitswap, - bsnet.ProtocolBitswapOneOne, - bsnet.ProtocolBitswapOneZero, - bsnet.ProtocolBitswapNoVers, - }, - } - for _, opt := range opts { - opt(&s) - } - - client := &networkClient{ - local: p.ID(), - network: n, - routing: n.routingserver.Client(p), - supportedProtocols: s.SupportedProtocols, - } - n.clients[p.ID()] = &receiverQueue{receiver: client} - return client -} - -func (n *network) HasPeer(p peer.ID) bool { - n.mu.Lock() - defer n.mu.Unlock() - - _, found := n.clients[p] - return found -} - -// TODO should this be completely asynchronous? -// TODO what does the network layer do with errors received from services? -func (n *network) SendMessage( - ctx context.Context, - from peer.ID, - to peer.ID, - mes bsmsg.BitSwapMessage) error { - - mes = mes.Clone() - - n.mu.Lock() - defer n.mu.Unlock() - - latencies, ok := n.latencies[from] - if !ok { - latencies = make(map[peer.ID]time.Duration) - n.latencies[from] = latencies - } - - latency, ok := latencies[to] - if !ok { - latency = n.delay.NextWaitTime() - latencies[to] = latency - } - - var bandwidthDelay time.Duration - if n.isRateLimited { - rateLimiters, ok := n.rateLimiters[from] - if !ok { - rateLimiters = make(map[peer.ID]*mocknet.RateLimiter) - n.rateLimiters[from] = rateLimiters - } - - rateLimiter, ok := rateLimiters[to] - if !ok { - rateLimiter = mocknet.NewRateLimiter(n.rateLimitGenerator.NextRateLimit()) - rateLimiters[to] = rateLimiter - } - - size := mes.ToProtoV1().Size() - bandwidthDelay = rateLimiter.Limit(size) - } else { - bandwidthDelay = 0 - } - - receiver, ok := n.clients[to] - if !ok { - return errors.New("cannot locate peer on network") - } - - // nb: terminate the context since the context wouldn't actually be passed - // over the network in a real scenario - - msg := &message{ - from: from, - msg: mes, - shouldSend: time.Now().Add(latency).Add(bandwidthDelay), - } - receiver.enqueue(msg) - - return nil -} - -var _ bsnet.Receiver = (*networkClient)(nil) - -type networkClient struct { - // These need to be at the top of the struct (allocated on the heap) for alignment on 32bit platforms. - stats bsnet.Stats - - local peer.ID - receivers []bsnet.Receiver - network *network - routing routing.Routing - supportedProtocols []protocol.ID -} - -func (nc *networkClient) ReceiveMessage(ctx context.Context, sender peer.ID, incoming bsmsg.BitSwapMessage) { - for _, v := range nc.receivers { - v.ReceiveMessage(ctx, sender, incoming) - } -} - -func (nc *networkClient) ReceiveError(e error) { - for _, v := range nc.receivers { - v.ReceiveError(e) - } -} - -func (nc *networkClient) PeerConnected(p peer.ID) { - for _, v := range nc.receivers { - v.PeerConnected(p) - } -} -func (nc *networkClient) PeerDisconnected(p peer.ID) { - for _, v := range nc.receivers { - v.PeerDisconnected(p) - } -} - -func (nc *networkClient) Self() peer.ID { - return nc.local -} - -func (nc *networkClient) Ping(ctx context.Context, p peer.ID) ping.Result { - return ping.Result{RTT: nc.Latency(p)} -} - -func (nc *networkClient) Latency(p peer.ID) time.Duration { - nc.network.mu.Lock() - defer nc.network.mu.Unlock() - return nc.network.latencies[nc.local][p] -} - -func (nc *networkClient) SendMessage( - ctx context.Context, - to peer.ID, - message bsmsg.BitSwapMessage) error { - if err := nc.network.SendMessage(ctx, nc.local, to, message); err != nil { - return err - } - atomic.AddUint64(&nc.stats.MessagesSent, 1) - return nil -} - -func (nc *networkClient) Stats() bsnet.Stats { - return bsnet.Stats{ - MessagesRecvd: atomic.LoadUint64(&nc.stats.MessagesRecvd), - MessagesSent: atomic.LoadUint64(&nc.stats.MessagesSent), - } -} - -// FindProvidersAsync returns a channel of providers for the given key. -func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { - // NB: this function duplicates the AddrInfo -> ID transformation in the - // bitswap network adapter. Not to worry. This network client will be - // deprecated once the ipfsnet.Mock is added. The code below is only - // temporary. - - out := make(chan peer.ID) - go func() { - defer close(out) - providers := nc.routing.FindProvidersAsync(ctx, k, max) - for info := range providers { - select { - case <-ctx.Done(): - case out <- info.ID: - } - } - }() - return out -} - -func (nc *networkClient) ConnectionManager() connmgr.ConnManager { - return &connmgr.NullConnMgr{} -} - -type messagePasser struct { - net *networkClient - target peer.ID - local peer.ID - ctx context.Context -} - -func (mp *messagePasser) SendMsg(ctx context.Context, m bsmsg.BitSwapMessage) error { - return mp.net.SendMessage(ctx, mp.target, m) -} - -func (mp *messagePasser) Close() error { - return nil -} - -func (mp *messagePasser) Reset() error { - return nil -} - -var oldProtos = map[protocol.ID]struct{}{ - bsnet.ProtocolBitswapNoVers: {}, - bsnet.ProtocolBitswapOneZero: {}, - bsnet.ProtocolBitswapOneOne: {}, -} - -func (mp *messagePasser) SupportsHave() bool { - protos := mp.net.network.clients[mp.target].receiver.supportedProtocols - for _, proto := range protos { - if _, ok := oldProtos[proto]; !ok { - return true - } - } - return false -} - -func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID, opts *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) { - return &messagePasser{ - net: nc, - target: p, - local: nc.local, - ctx: ctx, - }, nil -} - -// Provide provides the key to the network. -func (nc *networkClient) Provide(ctx context.Context, k cid.Cid) error { - return nc.routing.Provide(ctx, k, true) -} - -func (nc *networkClient) Start(r ...bsnet.Receiver) { - nc.receivers = r -} - -func (nc *networkClient) Stop() { -} - -func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { - nc.network.mu.Lock() - otherClient, ok := nc.network.clients[p] - if !ok { - nc.network.mu.Unlock() - return errors.New("no such peer in network") - } - - tag := tagForPeers(nc.local, p) - if _, ok := nc.network.conns[tag]; ok { - nc.network.mu.Unlock() - // log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") - return nil - } - nc.network.conns[tag] = struct{}{} - nc.network.mu.Unlock() - - otherClient.receiver.PeerConnected(nc.local) - nc.PeerConnected(p) - return nil -} - -func (nc *networkClient) DisconnectFrom(_ context.Context, p peer.ID) error { - nc.network.mu.Lock() - defer nc.network.mu.Unlock() - - otherClient, ok := nc.network.clients[p] - if !ok { - return errors.New("no such peer in network") - } - - tag := tagForPeers(nc.local, p) - if _, ok := nc.network.conns[tag]; !ok { - // Already disconnected - return nil - } - delete(nc.network.conns, tag) - - otherClient.receiver.PeerDisconnected(nc.local) - nc.PeerDisconnected(p) - return nil -} - -func (rq *receiverQueue) enqueue(m *message) { - rq.lk.Lock() - defer rq.lk.Unlock() - rq.queue = append(rq.queue, m) - if !rq.active { - rq.active = true - go rq.process() - } -} - -func (rq *receiverQueue) Swap(i, j int) { - rq.queue[i], rq.queue[j] = rq.queue[j], rq.queue[i] -} - -func (rq *receiverQueue) Len() int { - return len(rq.queue) -} - -func (rq *receiverQueue) Less(i, j int) bool { - return rq.queue[i].shouldSend.UnixNano() < rq.queue[j].shouldSend.UnixNano() -} - -func (rq *receiverQueue) process() { - for { - rq.lk.Lock() - sort.Sort(rq) - if len(rq.queue) == 0 { - rq.active = false - rq.lk.Unlock() - return - } - m := rq.queue[0] - if time.Until(m.shouldSend).Seconds() < 0.1 { - rq.queue = rq.queue[1:] - rq.lk.Unlock() - time.Sleep(time.Until(m.shouldSend)) - atomic.AddUint64(&rq.receiver.stats.MessagesRecvd, 1) - rq.receiver.ReceiveMessage(context.TODO(), m.from, m.msg) - } else { - rq.lk.Unlock() - time.Sleep(100 * time.Millisecond) - } - } -} - -func tagForPeers(a, b peer.ID) string { - if a < b { - return string(a + b) - } - return string(b + a) + return libipfs.RateLimitedVirtualNetwork(rs, d, rateLimitGenerator) } diff --git a/tracer/tracer.go b/tracer/tracer.go index af1d39d8..7169813d 100644 --- a/tracer/tracer.go +++ b/tracer/tracer.go @@ -1,13 +1,10 @@ package tracer import ( - bsmsg "github.com/ipfs/go-bitswap/message" - peer "github.com/libp2p/go-libp2p/core/peer" + libipfs "github.com/ipfs/go-libipfs/bitswap/tracer" ) // Tracer provides methods to access all messages sent and received by Bitswap. // This interface can be used to implement various statistics (this is original intent). -type Tracer interface { - MessageReceived(peer.ID, bsmsg.BitSwapMessage) - MessageSent(peer.ID, bsmsg.BitSwapMessage) -} +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/tracer.Tracer instead +type Tracer = libipfs.Tracer diff --git a/wantlist/forward.go b/wantlist/forward.go index c7eba707..320c5e2d 100644 --- a/wantlist/forward.go +++ b/wantlist/forward.go @@ -1,23 +1,23 @@ package wantlist import ( - "github.com/ipfs/go-bitswap/client/wantlist" "github.com/ipfs/go-cid" + libipfs "github.com/ipfs/go-libipfs/bitswap/wantlist" ) type ( - // DEPRECATED use wantlist.Entry instead - Entry = wantlist.Entry - // DEPRECATED use wantlist.Wantlist instead - Wantlist = wantlist.Wantlist + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/wantlist.Entry instead + Entry = libipfs.Entry + // Deprecated: use github.com/ipfs/go-libipfs/bitswap/wantlist.Wantlist instead + Wantlist = libipfs.Wantlist ) -// DEPRECATED use wantlist.New instead +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/wantlist.New instead func New() *Wantlist { - return wantlist.New() + return libipfs.New() } -// DEPRECATED use wantlist.NewRefEntry instead +// Deprecated: use github.com/ipfs/go-libipfs/bitswap/wantlist.New instead func NewRefEntry(c cid.Cid, p int32) Entry { - return wantlist.NewRefEntry(c, p) + return libipfs.NewRefEntry(c, p) }