-
Notifications
You must be signed in to change notification settings - Fork 264
/
write.go
116 lines (99 loc) · 2.89 KB
/
write.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
package ipld
import (
"context"
"fmt"
"math"
stdsync "sync"
ipld "github.com/ipfs/go-ipld-format"
"github.com/lazyledger/nmt"
"github.com/lazyledger/rsmt2d"
"github.com/libp2p/go-libp2p-core/routing"
"github.com/lazyledger/lazyledger-core/ipfs/plugin"
"github.com/lazyledger/lazyledger-core/libs/log"
"github.com/lazyledger/lazyledger-core/p2p/ipld/wrapper"
"github.com/lazyledger/lazyledger-core/types"
)
// PutBlock posts and pins erasured block data to IPFS using the provided
// ipld.NodeAdder. Note: the erasured data is currently recomputed
func PutBlock(
ctx context.Context,
adder ipld.NodeAdder,
block *types.Block,
croute routing.ContentRouting,
logger log.Logger,
) error {
// recompute the shares
namespacedShares, _ := block.Data.ComputeShares()
shares := namespacedShares.RawShares()
// don't do anything if there is no data to put on IPFS
if len(shares) == 0 {
return nil
}
// create nmt adder wrapping batch adder
batchAdder := NewNmtNodeAdder(ctx, ipld.NewBatch(ctx, adder))
// create the nmt wrapper to generate row and col commitments
squareSize := uint32(math.Sqrt(float64(len(shares))))
tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(squareSize), nmt.NodeVisitor(batchAdder.Visit))
// recompute the eds
eds, err := rsmt2d.ComputeExtendedDataSquare(shares, rsmt2d.NewRSGF8Codec(), tree.Constructor)
if err != nil {
return fmt.Errorf("failure to recompute the extended data square: %w", err)
}
// setup provide workers
const workers = 32
var wg stdsync.WaitGroup
jobs := make(chan []byte, workers)
errc := make(chan error, 1)
workerCtx, workerCancel := context.WithCancel(ctx)
defer workerCancel() // ensure that this cancel is called
// start feeding the workers jobs
go feedJobs(workerCtx, jobs, eds.RowRoots(), eds.ColumnRoots())
// start each worker
for i := 0; i < workers; i++ {
wg.Add(1)
worker := provideWorker{croute, logger, workerCancel}
go worker.provide(workerCtx, &wg, jobs, errc)
}
// for for all jobs to finish, an error to occur, or a if a timeout is reached
wg.Wait()
err = collectErrors(errc)
if err != nil {
return err
}
// commit the batch to ipfs
return batchAdder.Commit()
}
func collectErrors(errc chan error) error {
close(errc)
err := <-errc
return err
}
type provideWorker struct {
croute routing.ContentRouting
logger log.Logger
cancel context.CancelFunc
}
func feedJobs(ctx context.Context, jobs chan<- []byte, roots ...[][]byte) {
defer close(jobs)
for _, rootSet := range roots {
for _, root := range rootSet {
select {
case <-ctx.Done():
return
default:
jobs <- root
}
}
}
}
func (w *provideWorker) provide(ctx context.Context, wg *stdsync.WaitGroup, jobs <-chan []byte, errc chan<- error) {
defer wg.Done()
for job := range jobs {
rootCid := plugin.MustCidFromNamespacedSha256(job)
err := w.croute.Provide(ctx, rootCid, true)
if err != nil {
w.cancel()
errc <- err
}
}
}